]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Jun 2020 13:35:38 +0000 (15:35 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Jun 2020 13:35:38 +0000 (15:35 +0200)
added patches:
x86-mm-ptdump-calculate-effective-permissions-correctly.patch
x86-pci-mark-intel-c620-mroms-as-having-non-compliant-bars.patch
x86-speculation-avoid-force-disabling-ibpb-based-on-stibp-and-enhanced-ibrs.patch
x86-speculation-pr_spec_force_disable-enforcement-for-indirect-branches.patch
x86-speculation-prevent-rogue-cross-process-ssbd-shutdown.patch
x86_64-fix-jiffies-odr-violation.patch

queue-5.6/series
queue-5.6/x86-mm-ptdump-calculate-effective-permissions-correctly.patch [new file with mode: 0644]
queue-5.6/x86-pci-mark-intel-c620-mroms-as-having-non-compliant-bars.patch [new file with mode: 0644]
queue-5.6/x86-speculation-avoid-force-disabling-ibpb-based-on-stibp-and-enhanced-ibrs.patch [new file with mode: 0644]
queue-5.6/x86-speculation-pr_spec_force_disable-enforcement-for-indirect-branches.patch [new file with mode: 0644]
queue-5.6/x86-speculation-prevent-rogue-cross-process-ssbd-shutdown.patch [new file with mode: 0644]
queue-5.6/x86_64-fix-jiffies-odr-violation.patch [new file with mode: 0644]

index 9e3573771f2070cbcfeae80d2749d5a691fe42da..02e94e7a57b40b586e47f9bf1b9431a6261a4a9b 100644 (file)
@@ -45,3 +45,9 @@ drm-amd-display-remove-invalid-dc_is_hw_initialized-.patch
 drm-amd-display-not-doing-optimize-bandwidth-if-flip.patch
 aio-fix-async-fsync-creds.patch
 usercopy-mark-dma-kmalloc-caches-as-usercopy-caches.patch
+x86_64-fix-jiffies-odr-violation.patch
+x86-mm-ptdump-calculate-effective-permissions-correctly.patch
+x86-pci-mark-intel-c620-mroms-as-having-non-compliant-bars.patch
+x86-speculation-prevent-rogue-cross-process-ssbd-shutdown.patch
+x86-speculation-avoid-force-disabling-ibpb-based-on-stibp-and-enhanced-ibrs.patch
+x86-speculation-pr_spec_force_disable-enforcement-for-indirect-branches.patch
diff --git a/queue-5.6/x86-mm-ptdump-calculate-effective-permissions-correctly.patch b/queue-5.6/x86-mm-ptdump-calculate-effective-permissions-correctly.patch
new file mode 100644 (file)
index 0000000..7a66c07
--- /dev/null
@@ -0,0 +1,188 @@
+From 1494e0c38ee903e83aefb58caf54a9217273d49a Mon Sep 17 00:00:00 2001
+From: Steven Price <steven.price@arm.com>
+Date: Mon, 1 Jun 2020 21:49:58 -0700
+Subject: x86: mm: ptdump: calculate effective permissions correctly
+
+From: Steven Price <steven.price@arm.com>
+
+commit 1494e0c38ee903e83aefb58caf54a9217273d49a upstream.
+
+Patch series "Fix W+X debug feature on x86"
+
+Jan alerted me[1] that the W+X detection debug feature was broken in x86
+by my change[2] to switch x86 to use the generic ptdump infrastructure.
+
+Fundamentally the approach of trying to move the calculation of
+effective permissions into note_page() was broken because note_page() is
+only called for 'leaf' entries and the effective permissions are passed
+down via the internal nodes of the page tree.  The solution I've taken
+here is to create a new (optional) callback which is called for all
+nodes of the page tree and therefore can calculate the effective
+permissions.
+
+Secondly on some configurations (32 bit with PAE) "unsigned long" is not
+large enough to store the table entries.  The fix here is simple - let's
+just use a u64.
+
+[1] https://lore.kernel.org/lkml/d573dc7e-e742-84de-473d-f971142fa319@suse.com/
+[2] 2ae27137b2db ("x86: mm: convert dump_pagetables to use walk_page_range")
+
+This patch (of 2):
+
+By switching the x86 page table dump code to use the generic code the
+effective permissions are no longer calculated correctly because the
+note_page() function is only called for *leaf* entries.  To calculate
+the actual effective permissions it is necessary to observe the full
+hierarchy of the page tree.
+
+Introduce a new callback for ptdump which is called for every entry and
+can therefore update the prot_levels array correctly.  note_page() can
+then simply access the appropriate element in the array.
+
+[steven.price@arm.com: make the assignment conditional on val != 0]
+  Link: http://lkml.kernel.org/r/430c8ab4-e7cd-6933-dde6-087fac6db872@arm.com
+Fixes: 2ae27137b2db ("x86: mm: convert dump_pagetables to use walk_page_range")
+Reported-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Steven Price <steven.price@arm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Qian Cai <cai@lca.pw>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200521152308.33096-1-steven.price@arm.com
+Link: http://lkml.kernel.org/r/20200521152308.33096-2-steven.price@arm.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/dump_pagetables.c |   33 ++++++++++++++++++++-------------
+ include/linux/ptdump.h        |    1 +
+ mm/ptdump.c                   |   17 ++++++++++++++++-
+ 3 files changed, 37 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/mm/dump_pagetables.c
++++ b/arch/x86/mm/dump_pagetables.c
+@@ -249,10 +249,22 @@ static void note_wx(struct pg_state *st,
+                 (void *)st->start_address);
+ }
+-static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
++static void effective_prot(struct ptdump_state *pt_st, int level, u64 val)
+ {
+-      return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
+-             ((prot1 | prot2) & _PAGE_NX);
++      struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
++      pgprotval_t prot = val & PTE_FLAGS_MASK;
++      pgprotval_t effective;
++
++      if (level > 0) {
++              pgprotval_t higher_prot = st->prot_levels[level - 1];
++
++              effective = (higher_prot & prot & (_PAGE_USER | _PAGE_RW)) |
++                          ((higher_prot | prot) & _PAGE_NX);
++      } else {
++              effective = prot;
++      }
++
++      st->prot_levels[level] = effective;
+ }
+ /*
+@@ -270,16 +282,10 @@ static void note_page(struct ptdump_stat
+       struct seq_file *m = st->seq;
+       new_prot = val & PTE_FLAGS_MASK;
+-
+-      if (level > 0) {
+-              new_eff = effective_prot(st->prot_levels[level - 1],
+-                                       new_prot);
+-      } else {
+-              new_eff = new_prot;
+-      }
+-
+-      if (level >= 0)
+-              st->prot_levels[level] = new_eff;
++      if (!val)
++              new_eff = 0;
++      else
++              new_eff = st->prot_levels[level];
+       /*
+        * If we have a "break" in the series, we need to flush the state that
+@@ -374,6 +380,7 @@ static void ptdump_walk_pgd_level_core(s
+       struct pg_state st = {
+               .ptdump = {
+                       .note_page      = note_page,
++                      .effective_prot = effective_prot,
+                       .range          = ptdump_ranges
+               },
+               .level = -1,
+--- a/include/linux/ptdump.h
++++ b/include/linux/ptdump.h
+@@ -14,6 +14,7 @@ struct ptdump_state {
+       /* level is 0:PGD to 4:PTE, or -1 if unknown */
+       void (*note_page)(struct ptdump_state *st, unsigned long addr,
+                         int level, unsigned long val);
++      void (*effective_prot)(struct ptdump_state *st, int level, u64 val);
+       const struct ptdump_range *range;
+ };
+--- a/mm/ptdump.c
++++ b/mm/ptdump.c
+@@ -36,6 +36,9 @@ static int ptdump_pgd_entry(pgd_t *pgd,
+               return note_kasan_page_table(walk, addr);
+ #endif
++      if (st->effective_prot)
++              st->effective_prot(st, 0, pgd_val(val));
++
+       if (pgd_leaf(val))
+               st->note_page(st, addr, 0, pgd_val(val));
+@@ -53,6 +56,9 @@ static int ptdump_p4d_entry(p4d_t *p4d,
+               return note_kasan_page_table(walk, addr);
+ #endif
++      if (st->effective_prot)
++              st->effective_prot(st, 1, p4d_val(val));
++
+       if (p4d_leaf(val))
+               st->note_page(st, addr, 1, p4d_val(val));
+@@ -70,6 +76,9 @@ static int ptdump_pud_entry(pud_t *pud,
+               return note_kasan_page_table(walk, addr);
+ #endif
++      if (st->effective_prot)
++              st->effective_prot(st, 2, pud_val(val));
++
+       if (pud_leaf(val))
+               st->note_page(st, addr, 2, pud_val(val));
+@@ -87,6 +96,8 @@ static int ptdump_pmd_entry(pmd_t *pmd,
+               return note_kasan_page_table(walk, addr);
+ #endif
++      if (st->effective_prot)
++              st->effective_prot(st, 3, pmd_val(val));
+       if (pmd_leaf(val))
+               st->note_page(st, addr, 3, pmd_val(val));
+@@ -97,8 +108,12 @@ static int ptdump_pte_entry(pte_t *pte,
+                           unsigned long next, struct mm_walk *walk)
+ {
+       struct ptdump_state *st = walk->private;
++      pte_t val = READ_ONCE(*pte);
++
++      if (st->effective_prot)
++              st->effective_prot(st, 4, pte_val(val));
+-      st->note_page(st, addr, 4, pte_val(READ_ONCE(*pte)));
++      st->note_page(st, addr, 4, pte_val(val));
+       return 0;
+ }
diff --git a/queue-5.6/x86-pci-mark-intel-c620-mroms-as-having-non-compliant-bars.patch b/queue-5.6/x86-pci-mark-intel-c620-mroms-as-having-non-compliant-bars.patch
new file mode 100644 (file)
index 0000000..c2e8fbf
--- /dev/null
@@ -0,0 +1,45 @@
+From 1574051e52cb4b5b7f7509cfd729b76ca1117808 Mon Sep 17 00:00:00 2001
+From: Xiaochun Lee <lixc17@lenovo.com>
+Date: Thu, 14 May 2020 23:31:07 -0400
+Subject: x86/PCI: Mark Intel C620 MROMs as having non-compliant BARs
+
+From: Xiaochun Lee <lixc17@lenovo.com>
+
+commit 1574051e52cb4b5b7f7509cfd729b76ca1117808 upstream.
+
+The Intel C620 Platform Controller Hub has MROM functions that have non-PCI
+registers (undocumented in the public spec) where BAR 0 is supposed to be,
+which results in messages like this:
+
+  pci 0000:00:11.0: [Firmware Bug]: reg 0x30: invalid BAR (can't size)
+
+Mark these MROM functions as having non-compliant BARs so we don't try to
+probe any of them.  There are no other BARs on these devices.
+
+See the Intel C620 Series Chipset Platform Controller Hub Datasheet,
+May 2019, Document Number 336067-007US, sec 2.1, 35.5, 35.6.
+
+[bhelgaas: commit log, add 0xa26d]
+Link: https://lore.kernel.org/r/1589513467-17070-1-git-send-email-lixiaochun.2888@163.com
+Signed-off-by: Xiaochun Lee <lixc17@lenovo.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/pci/fixup.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -572,6 +572,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IN
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
+ /*
+  * Device [1022:7808]
diff --git a/queue-5.6/x86-speculation-avoid-force-disabling-ibpb-based-on-stibp-and-enhanced-ibrs.patch b/queue-5.6/x86-speculation-avoid-force-disabling-ibpb-based-on-stibp-and-enhanced-ibrs.patch
new file mode 100644 (file)
index 0000000..c24f5ff
--- /dev/null
@@ -0,0 +1,222 @@
+From 21998a351512eba4ed5969006f0c55882d995ada Mon Sep 17 00:00:00 2001
+From: Anthony Steinhauser <asteinhauser@google.com>
+Date: Tue, 19 May 2020 06:40:42 -0700
+Subject: x86/speculation: Avoid force-disabling IBPB based on STIBP and enhanced IBRS.
+
+From: Anthony Steinhauser <asteinhauser@google.com>
+
+commit 21998a351512eba4ed5969006f0c55882d995ada upstream.
+
+When STIBP is unavailable or enhanced IBRS is available, Linux
+force-disables the IBPB mitigation of Spectre-BTB even when simultaneous
+multithreading is disabled. While attempts to enable IBPB using
+prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, ...) fail with
+EPERM, the seccomp syscall (or its prctl(PR_SET_SECCOMP, ...) equivalent)
+which are used e.g. by Chromium or OpenSSH succeed with no errors but the
+application remains silently vulnerable to cross-process Spectre v2 attacks
+(classical BTB poisoning). At the same time the SYSFS reporting
+(/sys/devices/system/cpu/vulnerabilities/spectre_v2) displays that IBPB is
+conditionally enabled when in fact it is unconditionally disabled.
+
+STIBP is useful only when SMT is enabled. When SMT is disabled and STIBP is
+unavailable, it makes no sense to force-disable also IBPB, because IBPB
+protects against cross-process Spectre-BTB attacks regardless of the SMT
+state. At the same time since missing STIBP was only observed on AMD CPUs,
+AMD does not recommend using STIBP, but recommends using IBPB, so disabling
+IBPB because of missing STIBP goes directly against AMD's advice:
+https://developer.amd.com/wp-content/resources/Architecture_Guidelines_Update_Indirect_Branch_Control.pdf
+
+Similarly, enhanced IBRS is designed to protect cross-core BTB poisoning
+and BTB-poisoning attacks from user space against kernel (and
+BTB-poisoning attacks from guest against hypervisor), it is not designed
+to prevent cross-process (or cross-VM) BTB poisoning between processes (or
+VMs) running on the same core. Therefore, even with enhanced IBRS it is
+necessary to flush the BTB during context-switches, so there is no reason
+to force disable IBPB when enhanced IBRS is available.
+
+Enable the prctl control of IBPB even when STIBP is unavailable or enhanced
+IBRS is available.
+
+Fixes: 7cc765a67d8e ("x86/speculation: Enable prctl mode for spectre_v2_user")
+Signed-off-by: Anthony Steinhauser <asteinhauser@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/bugs.c |   87 +++++++++++++++++++++++++--------------------
+ 1 file changed, 50 insertions(+), 37 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -588,7 +588,9 @@ early_param("nospectre_v1", nospectre_v1
+ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+       SPECTRE_V2_NONE;
+-static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
++static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
++      SPECTRE_V2_USER_NONE;
++static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
+       SPECTRE_V2_USER_NONE;
+ #ifdef CONFIG_RETPOLINE
+@@ -734,15 +736,6 @@ spectre_v2_user_select_mitigation(enum s
+               break;
+       }
+-      /*
+-       * At this point, an STIBP mode other than "off" has been set.
+-       * If STIBP support is not being forced, check if STIBP always-on
+-       * is preferred.
+-       */
+-      if (mode != SPECTRE_V2_USER_STRICT &&
+-          boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+-              mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+-
+       /* Initialize Indirect Branch Prediction Barrier */
+       if (boot_cpu_has(X86_FEATURE_IBPB)) {
+               setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+@@ -765,23 +758,36 @@ spectre_v2_user_select_mitigation(enum s
+               pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+                       static_key_enabled(&switch_mm_always_ibpb) ?
+                       "always-on" : "conditional");
++
++              spectre_v2_user_ibpb = mode;
+       }
+-      /* If enhanced IBRS is enabled no STIBP required */
+-      if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
++      /*
++       * If enhanced IBRS is enabled or SMT impossible, STIBP is not
++       * required.
++       */
++      if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+               return;
+       /*
+-       * If SMT is not possible or STIBP is not available clear the STIBP
+-       * mode.
++       * At this point, an STIBP mode other than "off" has been set.
++       * If STIBP support is not being forced, check if STIBP always-on
++       * is preferred.
++       */
++      if (mode != SPECTRE_V2_USER_STRICT &&
++          boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
++              mode = SPECTRE_V2_USER_STRICT_PREFERRED;
++
++      /*
++       * If STIBP is not available, clear the STIBP mode.
+        */
+-      if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
++      if (!boot_cpu_has(X86_FEATURE_STIBP))
+               mode = SPECTRE_V2_USER_NONE;
++
++      spectre_v2_user_stibp = mode;
++
+ set_mode:
+-      spectre_v2_user = mode;
+-      /* Only print the STIBP mode when SMT possible */
+-      if (smt_possible)
+-              pr_info("%s\n", spectre_v2_user_strings[mode]);
++      pr_info("%s\n", spectre_v2_user_strings[mode]);
+ }
+ static const char * const spectre_v2_strings[] = {
+@@ -1014,7 +1020,7 @@ void cpu_bugs_smt_update(void)
+ {
+       mutex_lock(&spec_ctrl_mutex);
+-      switch (spectre_v2_user) {
++      switch (spectre_v2_user_stibp) {
+       case SPECTRE_V2_USER_NONE:
+               break;
+       case SPECTRE_V2_USER_STRICT:
+@@ -1257,14 +1263,16 @@ static int ib_prctl_set(struct task_stru
+ {
+       switch (ctrl) {
+       case PR_SPEC_ENABLE:
+-              if (spectre_v2_user == SPECTRE_V2_USER_NONE)
++              if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+                       return 0;
+               /*
+                * Indirect branch speculation is always disabled in strict
+                * mode.
+                */
+-              if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
+-                  spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
++              if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+                       return -EPERM;
+               task_clear_spec_ib_disable(task);
+               task_update_spec_tif(task);
+@@ -1275,10 +1283,12 @@ static int ib_prctl_set(struct task_stru
+                * Indirect branch speculation is always allowed when
+                * mitigation is force disabled.
+                */
+-              if (spectre_v2_user == SPECTRE_V2_USER_NONE)
++              if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+                       return -EPERM;
+-              if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
+-                  spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
++              if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+                       return 0;
+               task_set_spec_ib_disable(task);
+               if (ctrl == PR_SPEC_FORCE_DISABLE)
+@@ -1309,7 +1319,8 @@ void arch_seccomp_spec_mitigate(struct t
+ {
+       if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
+               ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+-      if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
++      if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
++          spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
+               ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+ }
+ #endif
+@@ -1340,22 +1351,24 @@ static int ib_prctl_get(struct task_stru
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+               return PR_SPEC_NOT_AFFECTED;
+-      switch (spectre_v2_user) {
+-      case SPECTRE_V2_USER_NONE:
++      if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
++          spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+               return PR_SPEC_ENABLE;
+-      case SPECTRE_V2_USER_PRCTL:
+-      case SPECTRE_V2_USER_SECCOMP:
++      else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
++          spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++          spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
++              return PR_SPEC_DISABLE;
++      else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
++          spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
++          spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
++          spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
+               if (task_spec_ib_force_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+               if (task_spec_ib_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+               return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+-      case SPECTRE_V2_USER_STRICT:
+-      case SPECTRE_V2_USER_STRICT_PREFERRED:
+-              return PR_SPEC_DISABLE;
+-      default:
++      } else
+               return PR_SPEC_NOT_AFFECTED;
+-      }
+ }
+ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+@@ -1594,7 +1607,7 @@ static char *stibp_state(void)
+       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+               return "";
+-      switch (spectre_v2_user) {
++      switch (spectre_v2_user_stibp) {
+       case SPECTRE_V2_USER_NONE:
+               return ", STIBP: disabled";
+       case SPECTRE_V2_USER_STRICT:
diff --git a/queue-5.6/x86-speculation-pr_spec_force_disable-enforcement-for-indirect-branches.patch b/queue-5.6/x86-speculation-pr_spec_force_disable-enforcement-for-indirect-branches.patch
new file mode 100644 (file)
index 0000000..b6f9637
--- /dev/null
@@ -0,0 +1,49 @@
+From 4d8df8cbb9156b0a0ab3f802b80cb5db57acc0bf Mon Sep 17 00:00:00 2001
+From: Anthony Steinhauser <asteinhauser@google.com>
+Date: Sun, 7 Jun 2020 05:44:19 -0700
+Subject: x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect branches.
+
+From: Anthony Steinhauser <asteinhauser@google.com>
+
+commit 4d8df8cbb9156b0a0ab3f802b80cb5db57acc0bf upstream.
+
+Currently, it is possible to enable indirect branch speculation even after
+it was force-disabled using the PR_SPEC_FORCE_DISABLE option. Moreover, the
+PR_GET_SPECULATION_CTRL command gives afterwards an incorrect result
+(force-disabled when it is in fact enabled). This also is inconsistent
+vs. STIBP and the documention which cleary states that
+PR_SPEC_FORCE_DISABLE cannot be undone.
+
+Fix this by actually enforcing force-disabled indirect branch
+speculation. PR_SPEC_ENABLE called after PR_SPEC_FORCE_DISABLE now fails
+with -EPERM as described in the documentation.
+
+Fixes: 9137bb27e60e ("x86/speculation: Add prctl() control for indirect branch speculation")
+Signed-off-by: Anthony Steinhauser <asteinhauser@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/bugs.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1268,11 +1268,14 @@ static int ib_prctl_set(struct task_stru
+                       return 0;
+               /*
+                * Indirect branch speculation is always disabled in strict
+-               * mode.
++               * mode. It can neither be enabled if it was force-disabled
++               * by a  previous prctl call.
++
+                */
+               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+-                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
++                  task_spec_ib_force_disable(task))
+                       return -EPERM;
+               task_clear_spec_ib_disable(task);
+               task_update_spec_tif(task);
diff --git a/queue-5.6/x86-speculation-prevent-rogue-cross-process-ssbd-shutdown.patch b/queue-5.6/x86-speculation-prevent-rogue-cross-process-ssbd-shutdown.patch
new file mode 100644 (file)
index 0000000..0fa68aa
--- /dev/null
@@ -0,0 +1,96 @@
+From dbbe2ad02e9df26e372f38cc3e70dab9222c832e Mon Sep 17 00:00:00 2001
+From: Anthony Steinhauser <asteinhauser@google.com>
+Date: Sun, 5 Jan 2020 12:19:43 -0800
+Subject: x86/speculation: Prevent rogue cross-process SSBD shutdown
+
+From: Anthony Steinhauser <asteinhauser@google.com>
+
+commit dbbe2ad02e9df26e372f38cc3e70dab9222c832e upstream.
+
+On context switch the change of TIF_SSBD and TIF_SPEC_IB are evaluated
+to adjust the mitigations accordingly. This is optimized to avoid the
+expensive MSR write if not needed.
+
+This optimization is buggy and allows an attacker to shutdown the SSBD
+protection of a victim process.
+
+The update logic reads the cached base value for the speculation control
+MSR which has neither the SSBD nor the STIBP bit set. It then OR's the
+SSBD bit only when TIF_SSBD is different and requests the MSR update.
+
+That means if TIF_SSBD of the previous and next task are the same, then
+the base value is not updated, even if TIF_SSBD is set. The MSR write is
+not requested.
+
+Subsequently if the TIF_STIBP bit differs then the STIBP bit is updated
+in the base value and the MSR is written with a wrong SSBD value.
+
+This was introduced when the per task/process conditional STIPB
+switching was added on top of the existing SSBD switching.
+
+It is exploitable if the attacker creates a process which enforces SSBD
+and has the contrary value of STIBP than the victim process (i.e. if the
+victim process enforces STIBP, the attacker process must not enforce it;
+if the victim process does not enforce STIBP, the attacker process must
+enforce it) and schedule it on the same core as the victim process. If
+the victim runs after the attacker the victim becomes vulnerable to
+Spectre V4.
+
+To fix this, update the MSR value independent of the TIF_SSBD difference
+and dependent on the SSBD mitigation method available. This ensures that
+a subsequent STIPB initiated MSR write has the correct state of SSBD.
+
+[ tglx: Handle X86_FEATURE_VIRT_SSBD & X86_FEATURE_VIRT_SSBD correctly
+        and massaged changelog ]
+
+Fixes: 5bfbe3ad5840 ("x86/speculation: Prepare for per task indirect branch speculation control")
+Signed-off-by: Anthony Steinhauser <asteinhauser@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/process.c |   28 ++++++++++------------------
+ 1 file changed, 10 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -546,28 +546,20 @@ static __always_inline void __speculatio
+       lockdep_assert_irqs_disabled();
+-      /*
+-       * If TIF_SSBD is different, select the proper mitigation
+-       * method. Note that if SSBD mitigation is disabled or permanentely
+-       * enabled this branch can't be taken because nothing can set
+-       * TIF_SSBD.
+-       */
+-      if (tif_diff & _TIF_SSBD) {
+-              if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
++      /* Handle change of TIF_SSBD depending on the mitigation method. */
++      if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
++              if (tif_diff & _TIF_SSBD)
+                       amd_set_ssb_virt_state(tifn);
+-              } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
++      } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
++              if (tif_diff & _TIF_SSBD)
+                       amd_set_core_ssb_state(tifn);
+-              } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+-                         static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+-                      msr |= ssbd_tif_to_spec_ctrl(tifn);
+-                      updmsr  = true;
+-              }
++      } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
++                 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
++              updmsr |= !!(tif_diff & _TIF_SSBD);
++              msr |= ssbd_tif_to_spec_ctrl(tifn);
+       }
+-      /*
+-       * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
+-       * otherwise avoid the MSR write.
+-       */
++      /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
+       if (IS_ENABLED(CONFIG_SMP) &&
+           static_branch_unlikely(&switch_to_cond_stibp)) {
+               updmsr |= !!(tif_diff & _TIF_SPEC_IB);
diff --git a/queue-5.6/x86_64-fix-jiffies-odr-violation.patch b/queue-5.6/x86_64-fix-jiffies-odr-violation.patch
new file mode 100644 (file)
index 0000000..a76ce36
--- /dev/null
@@ -0,0 +1,125 @@
+From d8ad6d39c35d2b44b3d48b787df7f3359381dcbf Mon Sep 17 00:00:00 2001
+From: Bob Haarman <inglorion@google.com>
+Date: Tue, 2 Jun 2020 12:30:59 -0700
+Subject: x86_64: Fix jiffies ODR violation
+
+From: Bob Haarman <inglorion@google.com>
+
+commit d8ad6d39c35d2b44b3d48b787df7f3359381dcbf upstream.
+
+'jiffies' and 'jiffies_64' are meant to alias (two different symbols that
+share the same address).  Most architectures make the symbols alias to the
+same address via a linker script assignment in their
+arch/<arch>/kernel/vmlinux.lds.S:
+
+jiffies = jiffies_64;
+
+which is effectively a definition of jiffies.
+
+jiffies and jiffies_64 are both forward declared for all architectures in
+include/linux/jiffies.h. jiffies_64 is defined in kernel/time/timer.c.
+
+x86_64 was peculiar in that it wasn't doing the above linker script
+assignment, but rather was:
+1. defining jiffies in arch/x86/kernel/time.c instead via the linker script.
+2. overriding the symbol jiffies_64 from kernel/time/timer.c in
+arch/x86/kernel/vmlinux.lds.s via 'jiffies_64 = jiffies;'.
+
+As Fangrui notes:
+
+  In LLD, symbol assignments in linker scripts override definitions in
+  object files. GNU ld appears to have the same behavior. It would
+  probably make sense for LLD to error "duplicate symbol" but GNU ld
+  is unlikely to adopt for compatibility reasons.
+
+This results in an ODR violation (UB), which seems to have survived
+thus far. Where it becomes harmful is when;
+
+1. -fno-semantic-interposition is used:
+
+As Fangrui notes:
+
+  Clang after LLVM commit 5b22bcc2b70d
+  ("[X86][ELF] Prefer to lower MC_GlobalAddress operands to .Lfoo$local")
+  defaults to -fno-semantic-interposition similar semantics which help
+  -fpic/-fPIC code avoid GOT/PLT when the referenced symbol is defined
+  within the same translation unit. Unlike GCC
+  -fno-semantic-interposition, Clang emits such relocations referencing
+  local symbols for non-pic code as well.
+
+This causes references to jiffies to refer to '.Ljiffies$local' when
+jiffies is defined in the same translation unit. Likewise, references to
+jiffies_64 become references to '.Ljiffies_64$local' in translation units
+that define jiffies_64.  Because these differ from the names used in the
+linker script, they will not be rewritten to alias one another.
+
+2. Full LTO
+
+Full LTO effectively treats all source files as one translation
+unit, causing these local references to be produced everywhere.  When
+the linker processes the linker script, there are no longer any
+references to jiffies_64' anywhere to replace with 'jiffies'.  And
+thus '.Ljiffies$local' and '.Ljiffies_64$local' no longer alias
+at all.
+
+In the process of porting patches enabling Full LTO from arm64 to x86_64,
+spooky bugs have been observed where the kernel appeared to boot, but init
+doesn't get scheduled.
+
+Avoid the ODR violation by matching other architectures and define jiffies
+only by linker script.  For -fno-semantic-interposition + Full LTO, there
+is no longer a global definition of jiffies for the compiler to produce a
+local symbol which the linker script won't ensure aliases to jiffies_64.
+
+Fixes: 40747ffa5aa8 ("asmlinkage: Make jiffies visible")
+Reported-by: Nathan Chancellor <natechancellor@gmail.com>
+Reported-by: Alistair Delva <adelva@google.com>
+Debugged-by: Nick Desaulniers <ndesaulniers@google.com>
+Debugged-by: Sami Tolvanen <samitolvanen@google.com>
+Suggested-by: Fangrui Song <maskray@google.com>
+Signed-off-by: Bob Haarman <inglorion@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sedat Dilek <sedat.dilek@gmail.com> # build+boot on
+Reviewed-by: Andi Kleen <ak@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: stable@vger.kernel.org
+Link: https://github.com/ClangBuiltLinux/linux/issues/852
+Link: https://lkml.kernel.org/r/20200602193100.229287-1-inglorion@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/time.c        |    4 ----
+ arch/x86/kernel/vmlinux.lds.S |    4 ++--
+ 2 files changed, 2 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/time.c
++++ b/arch/x86/kernel/time.c
+@@ -25,10 +25,6 @@
+ #include <asm/hpet.h>
+ #include <asm/time.h>
+-#ifdef CONFIG_X86_64
+-__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
+-#endif
+-
+ unsigned long profile_pc(struct pt_regs *regs)
+ {
+       unsigned long pc = instruction_pointer(regs);
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -39,13 +39,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
+ #ifdef CONFIG_X86_32
+ OUTPUT_ARCH(i386)
+ ENTRY(phys_startup_32)
+-jiffies = jiffies_64;
+ #else
+ OUTPUT_ARCH(i386:x86-64)
+ ENTRY(phys_startup_64)
+-jiffies_64 = jiffies;
+ #endif
++jiffies = jiffies_64;
++
+ #if defined(CONFIG_X86_64)
+ /*
+  * On 64-bit, align RODATA to 2MB so we retain large page mappings for