]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 8 Feb 2018 02:38:12 +0000 (03:38 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 8 Feb 2018 02:38:12 +0000 (03:38 +0100)
added patches:
array_index_nospec-sanitize-speculative-array-de-references.patch
documentation-document-array_index_nospec.patch
nl80211-sanitize-array-index-in-parse_txq_params.patch
vfs-fdtable-prevent-bounds-check-bypass-via-speculative-execution.patch
x86-alternative-print-unadorned-pointers.patch
x86-asm-move-status-from-thread_struct-to-thread_info.patch
x86-bugs-drop-one-mitigation-from-dmesg.patch
x86-cpu-bugs-make-retpoline-module-warning-conditional.patch
x86-cpufeatures-clean-up-spectre-v2-related-cpuid-flags.patch
x86-cpuid-fix-up-virtual-ibrs-ibpb-stibp-feature-bits-on-intel.patch
x86-entry-64-push-extra-regs-right-away.patch
x86-entry-64-remove-the-syscall64-fast-path.patch
x86-get_user-use-pointer-masking-to-limit-speculation.patch
x86-implement-array_index_mask_nospec.patch
x86-introduce-__uaccess_begin_nospec-and-uaccess_try_nospec.patch
x86-introduce-barrier_nospec.patch
x86-kvm-update-spectre-v1-mitigation.patch
x86-nospec-fix-header-guards-names.patch
x86-paravirt-remove-noreplace-paravirt-cmdline-option.patch
x86-pti-mark-constant-arrays-as-__initconst.patch
x86-retpoline-avoid-retpolines-for-built-in-__init-functions.patch
x86-retpoline-simplify-vmexit_fill_rsb.patch
x86-spectre-check-config_retpoline-in-command-line-parser.patch
x86-spectre-fix-spelling-mistake-vunerable-vulnerable.patch
x86-spectre-report-get_user-mitigation-for-spectre_v1.patch
x86-spectre-simplify-spectre_v2-command-line-parsing.patch
x86-speculation-add-basic-ibpb-indirect-branch-prediction-barrier-support.patch
x86-speculation-fix-typo-ibrs_att-which-should-be-ibrs_all.patch
x86-syscall-sanitize-syscall-table-de-references-under-speculation.patch
x86-uaccess-use-__uaccess_begin_nospec-and-uaccess_try_nospec.patch
x86-usercopy-replace-open-coded-stac-clac-with-__uaccess_-begin-end.patch

41 files changed:
queue-4.9/array_index_nospec-sanitize-speculative-array-de-references.patch [new file with mode: 0644]
queue-4.9/documentation-document-array_index_nospec.patch [new file with mode: 0644]
queue-4.9/kvm-nvmx-eliminate-vmcs02-pool.patch
queue-4.9/kvm-nvmx-mark-vmcs12-pages-dirty-on-l2-exit.patch
queue-4.9/kvm-nvmx-vmx_complete_nested_posted_interrupt-can-t-fail.patch
queue-4.9/kvm-svm-allow-direct-access-to-msr_ia32_spec_ctrl.patch
queue-4.9/kvm-vmx-allow-direct-access-to-msr_ia32_spec_ctrl.patch
queue-4.9/kvm-vmx-emulate-msr_ia32_arch_capabilities.patch
queue-4.9/kvm-vmx-introduce-alloc_loaded_vmcs.patch
queue-4.9/kvm-vmx-make-msr-bitmaps-per-vcpu.patch
queue-4.9/kvm-x86-add-ibpb-support.patch
queue-4.9/nl80211-sanitize-array-index-in-parse_txq_params.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/vfs-fdtable-prevent-bounds-check-bypass-via-speculative-execution.patch [new file with mode: 0644]
queue-4.9/x86-alternative-print-unadorned-pointers.patch [new file with mode: 0644]
queue-4.9/x86-asm-move-status-from-thread_struct-to-thread_info.patch [new file with mode: 0644]
queue-4.9/x86-bugs-drop-one-mitigation-from-dmesg.patch [new file with mode: 0644]
queue-4.9/x86-cpu-bugs-make-retpoline-module-warning-conditional.patch [new file with mode: 0644]
queue-4.9/x86-cpufeatures-clean-up-spectre-v2-related-cpuid-flags.patch [new file with mode: 0644]
queue-4.9/x86-cpuid-fix-up-virtual-ibrs-ibpb-stibp-feature-bits-on-intel.patch [new file with mode: 0644]
queue-4.9/x86-entry-64-push-extra-regs-right-away.patch [new file with mode: 0644]
queue-4.9/x86-entry-64-remove-the-syscall64-fast-path.patch [new file with mode: 0644]
queue-4.9/x86-get_user-use-pointer-masking-to-limit-speculation.patch [new file with mode: 0644]
queue-4.9/x86-implement-array_index_mask_nospec.patch [new file with mode: 0644]
queue-4.9/x86-introduce-__uaccess_begin_nospec-and-uaccess_try_nospec.patch [new file with mode: 0644]
queue-4.9/x86-introduce-barrier_nospec.patch [new file with mode: 0644]
queue-4.9/x86-kvm-update-spectre-v1-mitigation.patch [new file with mode: 0644]
queue-4.9/x86-nospec-fix-header-guards-names.patch [new file with mode: 0644]
queue-4.9/x86-paravirt-remove-noreplace-paravirt-cmdline-option.patch [new file with mode: 0644]
queue-4.9/x86-pti-mark-constant-arrays-as-__initconst.patch [new file with mode: 0644]
queue-4.9/x86-retpoline-avoid-retpolines-for-built-in-__init-functions.patch [new file with mode: 0644]
queue-4.9/x86-retpoline-simplify-vmexit_fill_rsb.patch [new file with mode: 0644]
queue-4.9/x86-spectre-check-config_retpoline-in-command-line-parser.patch [new file with mode: 0644]
queue-4.9/x86-spectre-fix-spelling-mistake-vunerable-vulnerable.patch [new file with mode: 0644]
queue-4.9/x86-spectre-report-get_user-mitigation-for-spectre_v1.patch [new file with mode: 0644]
queue-4.9/x86-spectre-simplify-spectre_v2-command-line-parsing.patch [new file with mode: 0644]
queue-4.9/x86-speculation-add-basic-ibpb-indirect-branch-prediction-barrier-support.patch [new file with mode: 0644]
queue-4.9/x86-speculation-fix-typo-ibrs_att-which-should-be-ibrs_all.patch [new file with mode: 0644]
queue-4.9/x86-syscall-sanitize-syscall-table-de-references-under-speculation.patch [new file with mode: 0644]
queue-4.9/x86-uaccess-use-__uaccess_begin_nospec-and-uaccess_try_nospec.patch [new file with mode: 0644]
queue-4.9/x86-usercopy-replace-open-coded-stac-clac-with-__uaccess_-begin-end.patch [new file with mode: 0644]

diff --git a/queue-4.9/array_index_nospec-sanitize-speculative-array-de-references.patch b/queue-4.9/array_index_nospec-sanitize-speculative-array-de-references.patch
new file mode 100644 (file)
index 0000000..99fe9a2
--- /dev/null
@@ -0,0 +1,117 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 29 Jan 2018 17:02:22 -0800
+Subject: array_index_nospec: Sanitize speculative array de-references
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+
+(cherry picked from commit f3804203306e098dae9ca51540fcd5eb700d7f40)
+
+array_index_nospec() is proposed as a generic mechanism to mitigate
+against Spectre-variant-1 attacks, i.e. an attack that bypasses boundary
+checks via speculative execution. The array_index_nospec()
+implementation is expected to be safe for current generation CPUs across
+multiple architectures (ARM, x86).
+
+Based on an original implementation by Linus Torvalds, tweaked to remove
+speculative flows by Alexei Starovoitov, and tweaked again by Linus to
+introduce an x86 assembly implementation for the mask generation.
+
+Co-developed-by: Linus Torvalds <torvalds@linux-foundation.org>
+Co-developed-by: Alexei Starovoitov <ast@kernel.org>
+Suggested-by: Cyril Novikov <cnovikov@lynx.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arch@vger.kernel.org
+Cc: kernel-hardening@lists.openwall.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: gregkh@linuxfoundation.org
+Cc: torvalds@linux-foundation.org
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727414229.33451.18411580953862676575.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/nospec.h |   72 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 72 insertions(+)
+ create mode 100644 include/linux/nospec.h
+
+--- /dev/null
++++ b/include/linux/nospec.h
+@@ -0,0 +1,72 @@
++// SPDX-License-Identifier: GPL-2.0
++// Copyright(c) 2018 Linus Torvalds. All rights reserved.
++// Copyright(c) 2018 Alexei Starovoitov. All rights reserved.
++// Copyright(c) 2018 Intel Corporation. All rights reserved.
++
++#ifndef _LINUX_NOSPEC_H
++#define _LINUX_NOSPEC_H
++
++/**
++ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
++ * @index: array element index
++ * @size: number of elements in array
++ *
++ * When @index is out of bounds (@index >= @size), the sign bit will be
++ * set.  Extend the sign bit to all bits and invert, giving a result of
++ * zero for an out of bounds index, or ~0 if within bounds [0, @size).
++ */
++#ifndef array_index_mask_nospec
++static inline unsigned long array_index_mask_nospec(unsigned long index,
++                                                  unsigned long size)
++{
++      /*
++       * Warn developers about inappropriate array_index_nospec() usage.
++       *
++       * Even if the CPU speculates past the WARN_ONCE branch, the
++       * sign bit of @index is taken into account when generating the
++       * mask.
++       *
++       * This warning is compiled out when the compiler can infer that
++       * @index and @size are less than LONG_MAX.
++       */
++      if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,
++                      "array_index_nospec() limited to range of [0, LONG_MAX]\n"))
++              return 0;
++
++      /*
++       * Always calculate and emit the mask even if the compiler
++       * thinks the mask is not needed. The compiler does not take
++       * into account the value of @index under speculation.
++       */
++      OPTIMIZER_HIDE_VAR(index);
++      return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1);
++}
++#endif
++
++/*
++ * array_index_nospec - sanitize an array index after a bounds check
++ *
++ * For a code sequence like:
++ *
++ *     if (index < size) {
++ *         index = array_index_nospec(index, size);
++ *         val = array[index];
++ *     }
++ *
++ * ...if the CPU speculates past the bounds check then
++ * array_index_nospec() will clamp the index within the range of [0,
++ * size).
++ */
++#define array_index_nospec(index, size)                                       \
++({                                                                    \
++      typeof(index) _i = (index);                                     \
++      typeof(size) _s = (size);                                       \
++      unsigned long _mask = array_index_mask_nospec(_i, _s);          \
++                                                                      \
++      BUILD_BUG_ON(sizeof(_i) > sizeof(long));                        \
++      BUILD_BUG_ON(sizeof(_s) > sizeof(long));                        \
++                                                                      \
++      _i &= _mask;                                                    \
++      _i;                                                             \
++})
++#endif /* _LINUX_NOSPEC_H */
diff --git a/queue-4.9/documentation-document-array_index_nospec.patch b/queue-4.9/documentation-document-array_index_nospec.patch
new file mode 100644 (file)
index 0000000..5bad41f
--- /dev/null
@@ -0,0 +1,125 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Mon, 29 Jan 2018 17:02:16 -0800
+Subject: Documentation: Document array_index_nospec
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+
+(cherry picked from commit f84a56f73dddaeac1dba8045b007f742f61cd2da)
+
+Document the rationale and usage of the new array_index_nospec() helper.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Cc: linux-arch@vger.kernel.org
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: gregkh@linuxfoundation.org
+Cc: kernel-hardening@lists.openwall.com
+Cc: torvalds@linux-foundation.org
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727413645.33451.15878817161436755393.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/speculation.txt |   90 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 90 insertions(+)
+ create mode 100644 Documentation/speculation.txt
+
+--- /dev/null
++++ b/Documentation/speculation.txt
+@@ -0,0 +1,90 @@
++This document explains potential effects of speculation, and how undesirable
++effects can be mitigated portably using common APIs.
++
++===========
++Speculation
++===========
++
++To improve performance and minimize average latencies, many contemporary CPUs
++employ speculative execution techniques such as branch prediction, performing
++work which may be discarded at a later stage.
++
++Typically speculative execution cannot be observed from architectural state,
++such as the contents of registers. However, in some cases it is possible to
++observe its impact on microarchitectural state, such as the presence or
++absence of data in caches. Such state may form side-channels which can be
++observed to extract secret information.
++
++For example, in the presence of branch prediction, it is possible for bounds
++checks to be ignored by code which is speculatively executed. Consider the
++following code:
++
++      int load_array(int *array, unsigned int index)
++      {
++              if (index >= MAX_ARRAY_ELEMS)
++                      return 0;
++              else
++                      return array[index];
++      }
++
++Which, on arm64, may be compiled to an assembly sequence such as:
++
++      CMP     <index>, #MAX_ARRAY_ELEMS
++      B.LT    less
++      MOV     <returnval>, #0
++      RET
++  less:
++      LDR     <returnval>, [<array>, <index>]
++      RET
++
++It is possible that a CPU mis-predicts the conditional branch, and
++speculatively loads array[index], even if index >= MAX_ARRAY_ELEMS. This
++value will subsequently be discarded, but the speculated load may affect
++microarchitectural state which can be subsequently measured.
++
++More complex sequences involving multiple dependent memory accesses may
++result in sensitive information being leaked. Consider the following
++code, building on the prior example:
++
++      int load_dependent_arrays(int *arr1, int *arr2, int index)
++      {
++              int val1, val2,
++
++              val1 = load_array(arr1, index);
++              val2 = load_array(arr2, val1);
++
++              return val2;
++      }
++
++Under speculation, the first call to load_array() may return the value
++of an out-of-bounds address, while the second call will influence
++microarchitectural state dependent on this value. This may provide an
++arbitrary read primitive.
++
++====================================
++Mitigating speculation side-channels
++====================================
++
++The kernel provides a generic API to ensure that bounds checks are
++respected even under speculation. Architectures which are affected by
++speculation-based side-channels are expected to implement these
++primitives.
++
++The array_index_nospec() helper in <linux/nospec.h> can be used to
++prevent information from being leaked via side-channels.
++
++A call to array_index_nospec(index, size) returns a sanitized index
++value that is bounded to [0, size) even under cpu speculation
++conditions.
++
++This can be used to protect the earlier load_array() example:
++
++      int load_array(int *array, unsigned int index)
++      {
++              if (index >= MAX_ARRAY_ELEMS)
++                      return 0;
++              else {
++                      index = array_index_nospec(index, MAX_ARRAY_ELEMS);
++                      return array[index];
++              }
++      }
index 8a99c2ae01cb52c6459c80317ca89d560f7b62d2..b0cb27e97a95bee65a10b457d791d196b17d660d 100644 (file)
@@ -1,14 +1,12 @@
-From de3a0021a60635de96aa92713c1a31a96747d72c Mon Sep 17 00:00:00 2001
+From foo@baz Thu Feb  8 03:36:51 CET 2018
 From: Jim Mattson <jmattson@google.com>
 Date: Mon, 27 Nov 2017 17:22:25 -0600
 Subject: KVM: nVMX: Eliminate vmcs02 pool
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
 
 From: Jim Mattson <jmattson@google.com>
 
-commit de3a0021a60635de96aa92713c1a31a96747d72c upstream.
+
+(cherry picked from commit de3a0021a60635de96aa92713c1a31a96747d72c)
 
 The potential performance advantages of a vmcs02 pool have never been
 realized. To simplify the code, eliminate the pool. Instead, a single
@@ -29,7 +27,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
-@@ -173,7 +173,6 @@ module_param(ple_window_max, int, S_IRUG
+@@ -174,7 +174,6 @@ module_param(ple_window_max, int, S_IRUG
  extern const ulong vmx_return;
  
  #define NR_AUTOLOAD_MSRS 8
@@ -37,7 +35,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
  struct vmcs {
        u32 revision_id;
-@@ -207,7 +206,7 @@ struct shared_msr_entry {
+@@ -208,7 +207,7 @@ struct shared_msr_entry {
   * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
   * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
   * More than one of these structures may exist, if L1 runs multiple L2 guests.
@@ -46,7 +44,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
   * underlying hardware which will be used to run L2.
   * This structure is packed to ensure that its layout is identical across
   * machines (necessary for live migration).
-@@ -386,13 +385,6 @@ struct __packed vmcs12 {
+@@ -387,13 +386,6 @@ struct __packed vmcs12 {
   */
  #define VMCS12_SIZE 0x1000
  
@@ -60,7 +58,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  /*
   * The nested_vmx structure is part of vcpu_vmx, and holds information we need
   * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
-@@ -419,15 +411,15 @@ struct nested_vmx {
+@@ -420,15 +412,15 @@ struct nested_vmx {
         */
        bool sync_shadow_vmcs;
  
@@ -81,7 +79,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
         */
        struct page *apic_access_page;
        struct page *virtual_apic_page;
-@@ -6684,94 +6676,6 @@ static int handle_monitor(struct kvm_vcp
+@@ -6682,94 +6674,6 @@ static int handle_monitor(struct kvm_vcp
  }
  
  /*
@@ -176,7 +174,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
   * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
   * set the success or error code of an emulated VMX instruction, as specified
   * by Vol 2B, VMX Instruction Reference, "Conventions".
-@@ -7084,6 +6988,12 @@ static int handle_vmon(struct kvm_vcpu *
+@@ -7082,6 +6986,12 @@ static int handle_vmon(struct kvm_vcpu *
                return 1;
        }
  
@@ -189,7 +187,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        if (cpu_has_vmx_msr_bitmap()) {
                vmx->nested.msr_bitmap =
                                (unsigned long *)__get_free_page(GFP_KERNEL);
-@@ -7106,9 +7016,6 @@ static int handle_vmon(struct kvm_vcpu *
+@@ -7104,9 +7014,6 @@ static int handle_vmon(struct kvm_vcpu *
                vmx->vmcs01.shadow_vmcs = shadow_vmcs;
        }
  
@@ -199,7 +197,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
                     HRTIMER_MODE_REL_PINNED);
        vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
-@@ -7126,6 +7033,9 @@ out_cached_vmcs12:
+@@ -7124,6 +7031,9 @@ out_cached_vmcs12:
        free_page((unsigned long)vmx->nested.msr_bitmap);
  
  out_msr_bitmap:
@@ -209,7 +207,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        return -ENOMEM;
  }
  
-@@ -7211,7 +7121,7 @@ static void free_nested(struct vcpu_vmx
+@@ -7209,7 +7119,7 @@ static void free_nested(struct vcpu_vmx
                vmx->vmcs01.shadow_vmcs = NULL;
        }
        kfree(vmx->nested.cached_vmcs12);
@@ -218,7 +216,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        if (vmx->nested.apic_access_page) {
                nested_release_page(vmx->nested.apic_access_page);
                vmx->nested.apic_access_page = NULL;
-@@ -7227,7 +7137,7 @@ static void free_nested(struct vcpu_vmx
+@@ -7225,7 +7135,7 @@ static void free_nested(struct vcpu_vmx
                vmx->nested.pi_desc = NULL;
        }
  
@@ -227,7 +225,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  }
  
  /* Emulate the VMXOFF instruction */
-@@ -7261,8 +7171,6 @@ static int handle_vmclear(struct kvm_vcp
+@@ -7259,8 +7169,6 @@ static int handle_vmclear(struct kvm_vcp
                        vmptr + offsetof(struct vmcs12, launch_state),
                        &zero, sizeof(zero));
  
@@ -236,7 +234,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        skip_emulated_instruction(vcpu);
        nested_vmx_succeed(vcpu);
        return 1;
-@@ -8051,10 +7959,11 @@ static bool nested_vmx_exit_handled(stru
+@@ -8049,10 +7957,11 @@ static bool nested_vmx_exit_handled(stru
  
        /*
         * The host physical addresses of some pages of guest memory
@@ -252,7 +250,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
         *
         * Mark them dirty on every exit from L2 to prevent them from
         * getting out of sync with dirty tracking.
-@@ -10223,7 +10132,6 @@ static int nested_vmx_run(struct kvm_vcp
+@@ -10221,7 +10130,6 @@ static int nested_vmx_run(struct kvm_vcp
        struct vmcs12 *vmcs12;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        int cpu;
@@ -260,7 +258,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        bool ia32e;
        u32 msr_entry_idx;
  
-@@ -10363,17 +10271,13 @@ static int nested_vmx_run(struct kvm_vcp
+@@ -10361,17 +10269,13 @@ static int nested_vmx_run(struct kvm_vcp
         * the nested entry.
         */
  
@@ -279,7 +277,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        vmx_vcpu_put(vcpu);
        vmx_vcpu_load(vcpu, cpu);
        vcpu->cpu = cpu;
-@@ -10888,10 +10792,6 @@ static void nested_vmx_vmexit(struct kvm
+@@ -10886,10 +10790,6 @@ static void nested_vmx_vmexit(struct kvm
        vm_exit_controls_reset_shadow(vmx);
        vmx_segment_cache_clear(vmx);
  
index 9361ea07fba5139e077ca7c5bb1c532aebf3424d..cfb55bb78755e444964d0df8a05c8541b6e2ca25 100644 (file)
@@ -1,14 +1,12 @@
-From c9f04407f2e0b3fc9ff7913c65fcfcb0a4b61570 Mon Sep 17 00:00:00 2001
+From foo@baz Thu Feb  8 03:36:51 CET 2018
 From: David Matlack <dmatlack@google.com>
 Date: Tue, 1 Aug 2017 14:00:40 -0700
 Subject: KVM: nVMX: mark vmcs12 pages dirty on L2 exit
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
 
 From: David Matlack <dmatlack@google.com>
 
-commit c9f04407f2e0b3fc9ff7913c65fcfcb0a4b61570 upstream.
+
+(cherry picked from commit c9f04407f2e0b3fc9ff7913c65fcfcb0a4b61570)
 
 The host physical addresses of L1's Virtual APIC Page and Posted
 Interrupt descriptor are loaded into the VMCS02. The CPU may write
@@ -31,7 +29,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
-@@ -4738,6 +4738,28 @@ static bool vmx_get_enable_apicv(void)
+@@ -4736,6 +4736,28 @@ static bool vmx_get_enable_apicv(void)
        return enable_apicv;
  }
  
@@ -60,7 +58,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
  {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-@@ -4745,18 +4767,15 @@ static void vmx_complete_nested_posted_i
+@@ -4743,18 +4765,15 @@ static void vmx_complete_nested_posted_i
        void *vapic_page;
        u16 status;
  
@@ -86,7 +84,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
                vapic_page = kmap(vmx->nested.virtual_apic_page);
                if (!vapic_page) {
                        WARN_ON(1);
-@@ -4772,6 +4791,8 @@ static void vmx_complete_nested_posted_i
+@@ -4770,6 +4789,8 @@ static void vmx_complete_nested_posted_i
                        vmcs_write16(GUEST_INTR_STATUS, status);
                }
        }
@@ -95,7 +93,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  }
  
  static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
-@@ -8028,6 +8049,18 @@ static bool nested_vmx_exit_handled(stru
+@@ -8026,6 +8047,18 @@ static bool nested_vmx_exit_handled(stru
                                vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
                                KVM_ISA_VMX);
  
index 9c5498f1bc92a8cd035676032c9e933abc9c2919..d9bb84b01b95b46649ee0cbaf653b51ab0666ede 100644 (file)
@@ -1,11 +1,12 @@
-From 6342c50ad12e8ce0736e722184a7dbdea4a3477f Mon Sep 17 00:00:00 2001
+From foo@baz Thu Feb  8 03:36:51 CET 2018
 From: David Hildenbrand <david@redhat.com>
 Date: Wed, 25 Jan 2017 11:58:58 +0100
 Subject: KVM: nVMX: vmx_complete_nested_posted_interrupt() can't fail
 
 From: David Hildenbrand <david@redhat.com>
 
-commit 6342c50ad12e8ce0736e722184a7dbdea4a3477f upstream.
+
+(cherry picked from commit 6342c50ad12e8ce0736e722184a7dbdea4a3477f)
 
 vmx_complete_nested_posted_interrupt() can't fail, let's turn it into
 a void function.
@@ -20,7 +21,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
-@@ -4738,7 +4738,7 @@ static bool vmx_get_enable_apicv(void)
+@@ -4736,7 +4736,7 @@ static bool vmx_get_enable_apicv(void)
        return enable_apicv;
  }
  
@@ -29,7 +30,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        int max_irr;
-@@ -4749,13 +4749,13 @@ static int vmx_complete_nested_posted_in
+@@ -4747,13 +4747,13 @@ static int vmx_complete_nested_posted_in
            vmx->nested.pi_pending) {
                vmx->nested.pi_pending = false;
                if (!pi_test_and_clear_on(vmx->nested.pi_desc))
@@ -45,7 +46,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
                vapic_page = kmap(vmx->nested.virtual_apic_page);
                if (!vapic_page) {
-@@ -4772,7 +4772,6 @@ static int vmx_complete_nested_posted_in
+@@ -4770,7 +4770,6 @@ static int vmx_complete_nested_posted_in
                        vmcs_write16(GUEST_INTR_STATUS, status);
                }
        }
@@ -53,7 +54,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  }
  
  static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
-@@ -10493,7 +10492,8 @@ static int vmx_check_nested_events(struc
+@@ -10491,7 +10490,8 @@ static int vmx_check_nested_events(struc
                return 0;
        }
  
index b81a525cb8bab93053ddf82dd18c0c72480dfb55..3b2438fc8193ba31e313f29b51ba81363e2dbf04 100644 (file)
@@ -1,11 +1,12 @@
-From b2ac58f90540e39324e7a29a7ad471407ae0bf48 Mon Sep 17 00:00:00 2001
+From foo@baz Thu Feb  8 03:36:51 CET 2018
 From: KarimAllah Ahmed <karahmed@amazon.de>
 Date: Sat, 3 Feb 2018 15:56:23 +0100
 Subject: KVM/SVM: Allow direct access to MSR_IA32_SPEC_CTRL
 
 From: KarimAllah Ahmed <karahmed@amazon.de>
 
-commit b2ac58f90540e39324e7a29a7ad471407ae0bf48 upstream.
+
+(cherry picked from commit b2ac58f90540e39324e7a29a7ad471407ae0bf48)
 
 [ Based on a patch from Paolo Bonzini <pbonzini@redhat.com> ]
 
index b94e36d76a1b77b6c51e79df6808c62a01587ed3..12f32771f1c8d96ec2207de863b8a0f854ef1a6b 100644 (file)
@@ -1,11 +1,12 @@
-From d28b387fb74da95d69d2615732f50cceb38e9a4d Mon Sep 17 00:00:00 2001
+From foo@baz Thu Feb  8 03:36:51 CET 2018
 From: KarimAllah Ahmed <karahmed@amazon.de>
 Date: Thu, 1 Feb 2018 22:59:45 +0100
 Subject: KVM/VMX: Allow direct access to MSR_IA32_SPEC_CTRL
 
 From: KarimAllah Ahmed <karahmed@amazon.de>
 
-commit d28b387fb74da95d69d2615732f50cceb38e9a4d upstream.
+
+(cherry picked from commit d28b387fb74da95d69d2615732f50cceb38e9a4d)
 
 [ Based on a patch from Ashok Raj <ashok.raj@intel.com> ]
 
@@ -108,7 +109,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        struct kvm_cpuid_entry2 *best;
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
-@@ -551,6 +551,7 @@ struct vcpu_vmx {
+@@ -552,6 +552,7 @@ struct vcpu_vmx {
  #endif
  
        u64                   arch_capabilities;
@@ -116,7 +117,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
        u32 vm_entry_controls_shadow;
        u32 vm_exit_controls_shadow;
-@@ -1854,6 +1855,29 @@ static void update_exception_bitmap(stru
+@@ -1852,6 +1853,29 @@ static void update_exception_bitmap(stru
  }
  
  /*
@@ -146,7 +147,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
   * Check if MSR is intercepted for L01 MSR bitmap.
   */
  static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
-@@ -2983,6 +3007,13 @@ static int vmx_get_msr(struct kvm_vcpu *
+@@ -2981,6 +3005,13 @@ static int vmx_get_msr(struct kvm_vcpu *
        case MSR_IA32_TSC:
                msr_info->data = guest_read_tsc(vcpu);
                break;
@@ -160,7 +161,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        case MSR_IA32_ARCH_CAPABILITIES:
                if (!msr_info->host_initiated &&
                    !guest_cpuid_has_arch_capabilities(vcpu))
-@@ -3093,6 +3124,36 @@ static int vmx_set_msr(struct kvm_vcpu *
+@@ -3091,6 +3122,36 @@ static int vmx_set_msr(struct kvm_vcpu *
        case MSR_IA32_TSC:
                kvm_write_tsc(vcpu, msr_info);
                break;
@@ -197,7 +198,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        case MSR_IA32_PRED_CMD:
                if (!msr_info->host_initiated &&
                    !guest_cpuid_has_ibpb(vcpu))
-@@ -5245,6 +5306,7 @@ static void vmx_vcpu_reset(struct kvm_vc
+@@ -5243,6 +5304,7 @@ static void vmx_vcpu_reset(struct kvm_vc
        u64 cr0;
  
        vmx->rmode.vm86_active = 0;
@@ -205,7 +206,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
        vmx->soft_vnmi_blocked = 0;
  
-@@ -8830,6 +8892,15 @@ static void __noclone vmx_vcpu_run(struc
+@@ -8828,6 +8890,15 @@ static void __noclone vmx_vcpu_run(struc
  
        vmx_arm_hv_timer(vcpu);
  
@@ -221,7 +222,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        vmx->__launched = vmx->loaded_vmcs->launched;
        asm(
                /* Store host registers */
-@@ -8948,6 +9019,27 @@ static void __noclone vmx_vcpu_run(struc
+@@ -8946,6 +9017,27 @@ static void __noclone vmx_vcpu_run(struc
  #endif
              );
  
@@ -249,7 +250,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();
  
-@@ -9507,7 +9599,7 @@ static inline bool nested_vmx_merge_msr_
+@@ -9505,7 +9597,7 @@ static inline bool nested_vmx_merge_msr_
        unsigned long *msr_bitmap_l1;
        unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
        /*
@@ -258,7 +259,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
         *
         * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
         *    ensures that we do not accidentally generate an L02 MSR bitmap
-@@ -9520,9 +9612,10 @@ static inline bool nested_vmx_merge_msr_
+@@ -9518,9 +9610,10 @@ static inline bool nested_vmx_merge_msr_
         *    the MSR.
         */
        bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
@@ -270,7 +271,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
                return false;
  
        page = nested_get_page(vcpu, vmcs12->msr_bitmap);
-@@ -9561,6 +9654,12 @@ static inline bool nested_vmx_merge_msr_
+@@ -9559,6 +9652,12 @@ static inline bool nested_vmx_merge_msr_
                }
        }
  
index 4b8d14c941ed51b94714990e2f41f124cb99fa71..4e57abb2ce5b8c3e3ebc22fe37675585ebe37b17 100644 (file)
@@ -1,11 +1,12 @@
-From 28c1c9fabf48d6ad596273a11c46e0d0da3e14cd Mon Sep 17 00:00:00 2001
+From foo@baz Thu Feb  8 03:36:51 CET 2018
 From: KarimAllah Ahmed <karahmed@amazon.de>
 Date: Thu, 1 Feb 2018 22:59:44 +0100
 Subject: KVM/VMX: Emulate MSR_IA32_ARCH_CAPABILITIES
 
 From: KarimAllah Ahmed <karahmed@amazon.de>
 
-commit 28c1c9fabf48d6ad596273a11c46e0d0da3e14cd upstream.
+
+(cherry picked from commit 28c1c9fabf48d6ad596273a11c46e0d0da3e14cd)
 
 Intel processors use MSR_IA32_ARCH_CAPABILITIES MSR to indicate RDCL_NO
 (bit 0) and IBRS_ALL (bit 1). This is a read-only MSR. By default the
@@ -92,7 +93,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
   * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
-@@ -550,6 +550,8 @@ struct vcpu_vmx {
+@@ -551,6 +551,8 @@ struct vcpu_vmx {
        u64                   msr_guest_kernel_gs_base;
  #endif
  
@@ -101,7 +102,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        u32 vm_entry_controls_shadow;
        u32 vm_exit_controls_shadow;
        /*
-@@ -2981,6 +2983,12 @@ static int vmx_get_msr(struct kvm_vcpu *
+@@ -2979,6 +2981,12 @@ static int vmx_get_msr(struct kvm_vcpu *
        case MSR_IA32_TSC:
                msr_info->data = guest_read_tsc(vcpu);
                break;
@@ -114,7 +115,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        case MSR_IA32_SYSENTER_CS:
                msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
                break;
-@@ -3112,6 +3120,11 @@ static int vmx_set_msr(struct kvm_vcpu *
+@@ -3110,6 +3118,11 @@ static int vmx_set_msr(struct kvm_vcpu *
                vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
                                              MSR_TYPE_W);
                break;
@@ -126,7 +127,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        case MSR_IA32_CR_PAT:
                if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
                        if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
-@@ -5202,6 +5215,8 @@ static int vmx_vcpu_setup(struct vcpu_vm
+@@ -5200,6 +5213,8 @@ static int vmx_vcpu_setup(struct vcpu_vm
                ++vmx->nmsrs;
        }
  
index 32d35f88e11a18ec58f1bf24302ed2ca3c97c6d6..5a011cec496ceb3cfb5fee31a3d1d5138fdb7d2e 100644 (file)
@@ -1,11 +1,12 @@
-From f21f165ef922c2146cc5bdc620f542953c41714b Mon Sep 17 00:00:00 2001
+From foo@baz Thu Feb  8 03:36:51 CET 2018
 From: Paolo Bonzini <pbonzini@redhat.com>
 Date: Thu, 11 Jan 2018 12:16:15 +0100
 Subject: KVM: VMX: introduce alloc_loaded_vmcs
 
 From: Paolo Bonzini <pbonzini@redhat.com>
 
-commit f21f165ef922c2146cc5bdc620f542953c41714b upstream.
+
+(cherry picked from commit f21f165ef922c2146cc5bdc620f542953c41714b)
 
 Group together the calls to alloc_vmcs and loaded_vmcs_init.  Soon we'll also
 allocate an MSR bitmap there.
@@ -20,7 +21,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
-@@ -3524,11 +3524,6 @@ static struct vmcs *alloc_vmcs_cpu(int c
+@@ -3522,11 +3522,6 @@ static struct vmcs *alloc_vmcs_cpu(int c
        return vmcs;
  }
  
@@ -32,7 +33,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  static void free_vmcs(struct vmcs *vmcs)
  {
        free_pages((unsigned long)vmcs, vmcs_config.order);
-@@ -3547,6 +3542,22 @@ static void free_loaded_vmcs(struct load
+@@ -3545,6 +3540,22 @@ static void free_loaded_vmcs(struct load
        WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
  }
  
@@ -55,7 +56,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  static void free_kvm_area(void)
  {
        int cpu;
-@@ -6949,6 +6960,7 @@ static int handle_vmon(struct kvm_vcpu *
+@@ -6947,6 +6958,7 @@ static int handle_vmon(struct kvm_vcpu *
        struct vmcs *shadow_vmcs;
        const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
                | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -63,7 +64,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
        /* The Intel VMX Instruction Reference lists a bunch of bits that
         * are prerequisite to running VMXON, most notably cr4.VMXE must be
-@@ -6988,11 +7000,9 @@ static int handle_vmon(struct kvm_vcpu *
+@@ -6986,11 +6998,9 @@ static int handle_vmon(struct kvm_vcpu *
                return 1;
        }
  
@@ -77,7 +78,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
        if (cpu_has_vmx_msr_bitmap()) {
                vmx->nested.msr_bitmap =
-@@ -9113,17 +9123,15 @@ static struct kvm_vcpu *vmx_create_vcpu(
+@@ -9111,17 +9121,15 @@ static struct kvm_vcpu *vmx_create_vcpu(
        if (!vmx->guest_msrs)
                goto free_pml;
  
index a831850727e66f051dc3ebc4aece0e10a745af6b..311396544dc220a43cc06ddf6979e6b2fa01cffe 100644 (file)
@@ -1,11 +1,12 @@
-From 904e14fb7cb96401a7dc803ca2863fd5ba32ffe6 Mon Sep 17 00:00:00 2001
+From foo@baz Thu Feb  8 03:36:51 CET 2018
 From: Paolo Bonzini <pbonzini@redhat.com>
 Date: Tue, 16 Jan 2018 16:51:18 +0100
 Subject: KVM: VMX: make MSR bitmaps per-VCPU
 
 From: Paolo Bonzini <pbonzini@redhat.com>
 
-commit 904e14fb7cb96401a7dc803ca2863fd5ba32ffe6 upstream.
+
+(cherry picked from commit 904e14fb7cb96401a7dc803ca2863fd5ba32ffe6)
 
 Place the MSR bitmap in struct loaded_vmcs, and update it in place
 every time the x2apic or APICv state can change.  This is rare and
@@ -26,7 +27,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
-@@ -109,6 +109,14 @@ static u64 __read_mostly host_xss;
+@@ -110,6 +110,14 @@ static u64 __read_mostly host_xss;
  static bool __read_mostly enable_pml = 1;
  module_param_named(pml, enable_pml, bool, S_IRUGO);
  
@@ -41,7 +42,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
  
  /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
-@@ -190,6 +198,7 @@ struct loaded_vmcs {
+@@ -191,6 +199,7 @@ struct loaded_vmcs {
        struct vmcs *shadow_vmcs;
        int cpu;
        int launched;
@@ -49,7 +50,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        struct list_head loaded_vmcss_on_cpu_link;
  };
  
-@@ -428,8 +437,6 @@ struct nested_vmx {
+@@ -429,8 +438,6 @@ struct nested_vmx {
        bool pi_pending;
        u16 posted_intr_nv;
  
@@ -58,7 +59,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        struct hrtimer preemption_timer;
        bool preemption_timer_expired;
  
-@@ -530,6 +537,7 @@ struct vcpu_vmx {
+@@ -531,6 +538,7 @@ struct vcpu_vmx {
        unsigned long         host_rsp;
        u8                    fail;
        bool                  nmi_known_unmasked;
@@ -66,7 +67,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        u32                   exit_intr_info;
        u32                   idt_vectoring_info;
        ulong                 rflags;
-@@ -904,6 +912,7 @@ static u32 vmx_segment_access_rights(str
+@@ -902,6 +910,7 @@ static u32 vmx_segment_access_rights(str
  static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
  static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
  static int alloc_identity_pagetable(struct kvm *kvm);
@@ -74,7 +75,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
  static DEFINE_PER_CPU(struct vmcs *, vmxarea);
  static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
-@@ -923,12 +932,6 @@ static DEFINE_PER_CPU(spinlock_t, blocke
+@@ -921,12 +930,6 @@ static DEFINE_PER_CPU(spinlock_t, blocke
  
  static unsigned long *vmx_io_bitmap_a;
  static unsigned long *vmx_io_bitmap_b;
@@ -87,7 +88,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  static unsigned long *vmx_vmread_bitmap;
  static unsigned long *vmx_vmwrite_bitmap;
  
-@@ -2522,36 +2525,6 @@ static void move_msr_up(struct vcpu_vmx
+@@ -2520,36 +2523,6 @@ static void move_msr_up(struct vcpu_vmx
        vmx->guest_msrs[from] = tmp;
  }
  
@@ -124,7 +125,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  /*
   * Set up the vmcs to automatically save and restore system
   * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
-@@ -2592,7 +2565,7 @@ static void setup_msrs(struct vcpu_vmx *
+@@ -2590,7 +2563,7 @@ static void setup_msrs(struct vcpu_vmx *
        vmx->save_nmsrs = save_nmsrs;
  
        if (cpu_has_vmx_msr_bitmap())
@@ -133,7 +134,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  }
  
  /*
-@@ -3539,6 +3512,8 @@ static void free_loaded_vmcs(struct load
+@@ -3537,6 +3510,8 @@ static void free_loaded_vmcs(struct load
        loaded_vmcs_clear(loaded_vmcs);
        free_vmcs(loaded_vmcs->vmcs);
        loaded_vmcs->vmcs = NULL;
@@ -142,7 +143,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
  }
  
-@@ -3555,7 +3530,18 @@ static int alloc_loaded_vmcs(struct load
+@@ -3553,7 +3528,18 @@ static int alloc_loaded_vmcs(struct load
  
        loaded_vmcs->shadow_vmcs = NULL;
        loaded_vmcs_init(loaded_vmcs);
@@ -161,7 +162,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  }
  
  static void free_kvm_area(void)
-@@ -4564,10 +4550,8 @@ static void free_vpid(int vpid)
+@@ -4562,10 +4548,8 @@ static void free_vpid(int vpid)
        spin_unlock(&vmx_vpid_lock);
  }
  
@@ -174,7 +175,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  {
        int f = sizeof(unsigned long);
  
-@@ -4601,8 +4585,8 @@ static void __vmx_disable_intercept_for_
+@@ -4599,8 +4583,8 @@ static void __vmx_disable_intercept_for_
        }
  }
  
@@ -185,7 +186,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  {
        int f = sizeof(unsigned long);
  
-@@ -4636,6 +4620,15 @@ static void __vmx_enable_intercept_for_m
+@@ -4634,6 +4618,15 @@ static void __vmx_enable_intercept_for_m
        }
  }
  
@@ -201,7 +202,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  /*
   * If a msr is allowed by L0, we should check whether it is allowed by L1.
   * The corresponding bit will be cleared unless both of L0 and L1 allow it.
-@@ -4682,58 +4675,68 @@ static void nested_vmx_disable_intercept
+@@ -4680,58 +4673,68 @@ static void nested_vmx_disable_intercept
        }
  }
  
@@ -313,7 +314,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  }
  
  static bool vmx_get_enable_apicv(void)
-@@ -4982,7 +4985,7 @@ static void vmx_refresh_apicv_exec_ctrl(
+@@ -4980,7 +4983,7 @@ static void vmx_refresh_apicv_exec_ctrl(
        }
  
        if (cpu_has_vmx_msr_bitmap())
@@ -322,7 +323,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  }
  
  static u32 vmx_exec_control(struct vcpu_vmx *vmx)
-@@ -5071,7 +5074,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
+@@ -5069,7 +5072,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
                vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
        }
        if (cpu_has_vmx_msr_bitmap())
@@ -331,7 +332,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
        vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
  
-@@ -6402,7 +6405,7 @@ static void wakeup_handler(void)
+@@ -6400,7 +6403,7 @@ static void wakeup_handler(void)
  
  static __init int hardware_setup(void)
  {
@@ -340,7 +341,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
        rdmsrl_safe(MSR_EFER, &host_efer);
  
-@@ -6417,41 +6420,13 @@ static __init int hardware_setup(void)
+@@ -6415,41 +6418,13 @@ static __init int hardware_setup(void)
        if (!vmx_io_bitmap_b)
                goto out;
  
@@ -384,7 +385,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
        memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
        memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
-@@ -6460,12 +6435,9 @@ static __init int hardware_setup(void)
+@@ -6458,12 +6433,9 @@ static __init int hardware_setup(void)
  
        memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
  
@@ -398,7 +399,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        }
  
        if (boot_cpu_has(X86_FEATURE_NX))
-@@ -6522,47 +6494,8 @@ static __init int hardware_setup(void)
+@@ -6520,47 +6492,8 @@ static __init int hardware_setup(void)
                kvm_tsc_scaling_ratio_frac_bits = 48;
        }
  
@@ -446,7 +447,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        if (enable_ept) {
                kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
                        (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
-@@ -6608,22 +6541,10 @@ static __init int hardware_setup(void)
+@@ -6606,22 +6539,10 @@ static __init int hardware_setup(void)
  
        return alloc_kvm_area();
  
@@ -471,7 +472,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  out1:
        free_page((unsigned long)vmx_io_bitmap_b);
  out:
-@@ -6634,12 +6555,6 @@ out:
+@@ -6632,12 +6553,6 @@ out:
  
  static __exit void hardware_unsetup(void)
  {
@@ -484,7 +485,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        free_page((unsigned long)vmx_io_bitmap_b);
        free_page((unsigned long)vmx_io_bitmap_a);
        free_page((unsigned long)vmx_vmwrite_bitmap);
-@@ -7004,13 +6919,6 @@ static int handle_vmon(struct kvm_vcpu *
+@@ -7002,13 +6917,6 @@ static int handle_vmon(struct kvm_vcpu *
        if (r < 0)
                goto out_vmcs02;
  
@@ -498,7 +499,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
        if (!vmx->nested.cached_vmcs12)
                goto out_cached_vmcs12;
-@@ -7040,9 +6948,6 @@ out_shadow_vmcs:
+@@ -7038,9 +6946,6 @@ out_shadow_vmcs:
        kfree(vmx->nested.cached_vmcs12);
  
  out_cached_vmcs12:
@@ -508,7 +509,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        free_loaded_vmcs(&vmx->nested.vmcs02);
  
  out_vmcs02:
-@@ -7121,10 +7026,6 @@ static void free_nested(struct vcpu_vmx
+@@ -7119,10 +7024,6 @@ static void free_nested(struct vcpu_vmx
        vmx->nested.vmxon = false;
        free_vpid(vmx->nested.vpid02);
        nested_release_vmcs12(vmx);
@@ -519,7 +520,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        if (enable_shadow_vmcs) {
                vmcs_clear(vmx->vmcs01.shadow_vmcs);
                free_vmcs(vmx->vmcs01.shadow_vmcs);
-@@ -8471,7 +8372,7 @@ static void vmx_set_virtual_x2apic_mode(
+@@ -8469,7 +8370,7 @@ static void vmx_set_virtual_x2apic_mode(
        }
        vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
  
@@ -528,7 +529,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  }
  
  static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
-@@ -9091,6 +8992,7 @@ static struct kvm_vcpu *vmx_create_vcpu(
+@@ -9089,6 +8990,7 @@ static struct kvm_vcpu *vmx_create_vcpu(
  {
        int err;
        struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
@@ -536,7 +537,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        int cpu;
  
        if (!vmx)
-@@ -9131,6 +9033,15 @@ static struct kvm_vcpu *vmx_create_vcpu(
+@@ -9129,6 +9031,15 @@ static struct kvm_vcpu *vmx_create_vcpu(
        if (err < 0)
                goto free_msrs;
  
@@ -552,7 +553,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        vmx->loaded_vmcs = &vmx->vmcs01;
        cpu = get_cpu();
        vmx_vcpu_load(&vmx->vcpu, cpu);
-@@ -9525,7 +9436,7 @@ static inline bool nested_vmx_merge_msr_
+@@ -9523,7 +9434,7 @@ static inline bool nested_vmx_merge_msr_
        int msr;
        struct page *page;
        unsigned long *msr_bitmap_l1;
@@ -561,7 +562,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
        /* This shortcut is ok because we support only x2APIC MSRs so far. */
        if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
-@@ -10045,6 +9956,9 @@ static void prepare_vmcs02(struct kvm_vc
+@@ -10043,6 +9954,9 @@ static void prepare_vmcs02(struct kvm_vc
        if (kvm_has_tsc_control)
                decache_tsc_multiplier(vmx);
  
@@ -571,7 +572,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        if (enable_vpid) {
                /*
                 * There is no direct mapping between vpid02 and vpid12, the
-@@ -10749,7 +10663,7 @@ static void load_vmcs12_host_state(struc
+@@ -10747,7 +10661,7 @@ static void load_vmcs12_host_state(struc
        vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
  
        if (cpu_has_vmx_msr_bitmap())
index c893472bd1aa86c825127e977daf46d9ad998f17..2fba2d6f3c827771a40c3755ae1a19efb7178ef9 100644 (file)
@@ -1,11 +1,12 @@
-From 15d45071523d89b3fb7372e2135fbd72f6af9506 Mon Sep 17 00:00:00 2001
+From foo@baz Thu Feb  8 03:36:51 CET 2018
 From: Ashok Raj <ashok.raj@intel.com>
 Date: Thu, 1 Feb 2018 22:59:43 +0100
 Subject: KVM/x86: Add IBPB support
 
 From: Ashok Raj <ashok.raj@intel.com>
 
-commit 15d45071523d89b3fb7372e2135fbd72f6af9506 upstream.
+
+(cherry picked from commit 15d45071523d89b3fb7372e2135fbd72f6af9506)
 
 The Indirect Branch Predictor Barrier (IBPB) is an indirect branch
 control mechanism. It keeps earlier branches from influencing
@@ -212,7 +213,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
                break;
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
-@@ -549,6 +549,7 @@ struct vcpu_vmx {
+@@ -550,6 +550,7 @@ struct vcpu_vmx {
        u64                   msr_host_kernel_gs_base;
        u64                   msr_guest_kernel_gs_base;
  #endif
@@ -220,7 +221,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        u32 vm_entry_controls_shadow;
        u32 vm_exit_controls_shadow;
        /*
-@@ -913,6 +914,8 @@ static void copy_vmcs12_to_shadow(struct
+@@ -911,6 +912,8 @@ static void copy_vmcs12_to_shadow(struct
  static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
  static int alloc_identity_pagetable(struct kvm *kvm);
  static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
@@ -229,7 +230,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
  static DEFINE_PER_CPU(struct vmcs *, vmxarea);
  static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
-@@ -1848,6 +1851,29 @@ static void update_exception_bitmap(stru
+@@ -1846,6 +1849,29 @@ static void update_exception_bitmap(stru
        vmcs_write32(EXCEPTION_BITMAP, eb);
  }
  
@@ -259,7 +260,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
                unsigned long entry, unsigned long exit)
  {
-@@ -2257,6 +2283,7 @@ static void vmx_vcpu_load(struct kvm_vcp
+@@ -2255,6 +2281,7 @@ static void vmx_vcpu_load(struct kvm_vcp
        if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
                per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
                vmcs_load(vmx->loaded_vmcs->vmcs);
@@ -267,7 +268,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        }
  
        if (!already_loaded) {
-@@ -3058,6 +3085,33 @@ static int vmx_set_msr(struct kvm_vcpu *
+@@ -3056,6 +3083,33 @@ static int vmx_set_msr(struct kvm_vcpu *
        case MSR_IA32_TSC:
                kvm_write_tsc(vcpu, msr_info);
                break;
@@ -301,7 +302,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        case MSR_IA32_CR_PAT:
                if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
                        if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
-@@ -9437,9 +9491,23 @@ static inline bool nested_vmx_merge_msr_
+@@ -9435,9 +9489,23 @@ static inline bool nested_vmx_merge_msr_
        struct page *page;
        unsigned long *msr_bitmap_l1;
        unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
@@ -327,7 +328,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
                return false;
  
        page = nested_get_page(vcpu, vmcs12->msr_bitmap);
-@@ -9477,6 +9545,13 @@ static inline bool nested_vmx_merge_msr_
+@@ -9475,6 +9543,13 @@ static inline bool nested_vmx_merge_msr_
                                MSR_TYPE_W);
                }
        }
diff --git a/queue-4.9/nl80211-sanitize-array-index-in-parse_txq_params.patch b/queue-4.9/nl80211-sanitize-array-index-in-parse_txq_params.patch
new file mode 100644 (file)
index 0000000..5de0872
--- /dev/null
@@ -0,0 +1,72 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 29 Jan 2018 17:03:15 -0800
+Subject: nl80211: Sanitize array index in parse_txq_params
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+
+(cherry picked from commit 259d8c1e984318497c84eef547bbb6b1d9f4eb05)
+
+Wireless drivers rely on parse_txq_params to validate that txq_params->ac
+is less than NL80211_NUM_ACS by the time the low-level driver's ->conf_tx()
+handler is called. Use a new helper, array_index_nospec(), to sanitize
+txq_params->ac with respect to speculation. I.e. ensure that any
+speculation into ->conf_tx() handlers is done with a value of
+txq_params->ac that is within the bounds of [0, NL80211_NUM_ACS).
+
+Reported-by: Christian Lamparter <chunkeey@gmail.com>
+Reported-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Johannes Berg <johannes@sipsolutions.net>
+Cc: linux-arch@vger.kernel.org
+Cc: kernel-hardening@lists.openwall.com
+Cc: gregkh@linuxfoundation.org
+Cc: linux-wireless@vger.kernel.org
+Cc: torvalds@linux-foundation.org
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727419584.33451.7700736761686184303.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/wireless/nl80211.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -16,6 +16,7 @@
+ #include <linux/nl80211.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/netlink.h>
++#include <linux/nospec.h>
+ #include <linux/etherdevice.h>
+ #include <net/net_namespace.h>
+ #include <net/genetlink.h>
+@@ -2014,20 +2015,22 @@ static const struct nla_policy txq_param
+ static int parse_txq_params(struct nlattr *tb[],
+                           struct ieee80211_txq_params *txq_params)
+ {
++      u8 ac;
++
+       if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] ||
+           !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] ||
+           !tb[NL80211_TXQ_ATTR_AIFS])
+               return -EINVAL;
+-      txq_params->ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]);
++      ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]);
+       txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]);
+       txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]);
+       txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]);
+       txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]);
+-      if (txq_params->ac >= NL80211_NUM_ACS)
++      if (ac >= NL80211_NUM_ACS)
+               return -EINVAL;
+-
++      txq_params->ac = array_index_nospec(ac, NL80211_NUM_ACS);
+       return 0;
+ }
index d487eb871be2d4080793394caac84207f72ee319..15db3734b2ba17fd077271b36a389e2966f79a26 100644 (file)
@@ -42,6 +42,37 @@ x86-cpufeatures-add-amd-feature-bits-for-speculation-control.patch
 x86-msr-add-definitions-for-new-speculation-control-msrs.patch
 x86-pti-do-not-enable-pti-on-cpus-which-are-not-vulnerable-to-meltdown.patch
 x86-cpufeature-blacklist-spec_ctrl-pred_cmd-on-early-spectre-v2-microcodes.patch
+x86-speculation-add-basic-ibpb-indirect-branch-prediction-barrier-support.patch
+x86-alternative-print-unadorned-pointers.patch
+x86-nospec-fix-header-guards-names.patch
+x86-bugs-drop-one-mitigation-from-dmesg.patch
+x86-cpu-bugs-make-retpoline-module-warning-conditional.patch
+x86-cpufeatures-clean-up-spectre-v2-related-cpuid-flags.patch
+x86-retpoline-simplify-vmexit_fill_rsb.patch
+x86-spectre-check-config_retpoline-in-command-line-parser.patch
+x86-entry-64-remove-the-syscall64-fast-path.patch
+x86-entry-64-push-extra-regs-right-away.patch
+x86-asm-move-status-from-thread_struct-to-thread_info.patch
+documentation-document-array_index_nospec.patch
+array_index_nospec-sanitize-speculative-array-de-references.patch
+x86-implement-array_index_mask_nospec.patch
+x86-introduce-barrier_nospec.patch
+x86-introduce-__uaccess_begin_nospec-and-uaccess_try_nospec.patch
+x86-usercopy-replace-open-coded-stac-clac-with-__uaccess_-begin-end.patch
+x86-uaccess-use-__uaccess_begin_nospec-and-uaccess_try_nospec.patch
+x86-get_user-use-pointer-masking-to-limit-speculation.patch
+x86-syscall-sanitize-syscall-table-de-references-under-speculation.patch
+vfs-fdtable-prevent-bounds-check-bypass-via-speculative-execution.patch
+nl80211-sanitize-array-index-in-parse_txq_params.patch
+x86-spectre-report-get_user-mitigation-for-spectre_v1.patch
+x86-spectre-fix-spelling-mistake-vunerable-vulnerable.patch
+x86-cpuid-fix-up-virtual-ibrs-ibpb-stibp-feature-bits-on-intel.patch
+x86-paravirt-remove-noreplace-paravirt-cmdline-option.patch
+x86-kvm-update-spectre-v1-mitigation.patch
+x86-retpoline-avoid-retpolines-for-built-in-__init-functions.patch
+x86-spectre-simplify-spectre_v2-command-line-parsing.patch
+x86-pti-mark-constant-arrays-as-__initconst.patch
+x86-speculation-fix-typo-ibrs_att-which-should-be-ibrs_all.patch
 kvm-nvmx-vmx_complete_nested_posted_interrupt-can-t-fail.patch
 kvm-nvmx-mark-vmcs12-pages-dirty-on-l2-exit.patch
 kvm-nvmx-eliminate-vmcs02-pool.patch
diff --git a/queue-4.9/vfs-fdtable-prevent-bounds-check-bypass-via-speculative-execution.patch b/queue-4.9/vfs-fdtable-prevent-bounds-check-bypass-via-speculative-execution.patch
new file mode 100644 (file)
index 0000000..6ed1c28
--- /dev/null
@@ -0,0 +1,54 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 29 Jan 2018 17:03:05 -0800
+Subject: vfs, fdtable: Prevent bounds-check bypass via speculative execution
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+
+(cherry picked from commit 56c30ba7b348b90484969054d561f711ba196507)
+
+'fd' is a user controlled value that is used as a data dependency to
+read from the 'fdt->fd' array.  In order to avoid potential leaks of
+kernel memory values, block speculative execution of the instruction
+stream that could issue reads based on an invalid 'file *' returned from
+__fcheck_files.
+
+Co-developed-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arch@vger.kernel.org
+Cc: kernel-hardening@lists.openwall.com
+Cc: gregkh@linuxfoundation.org
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: torvalds@linux-foundation.org
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727418500.33451.17392199002892248656.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/fdtable.h |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/include/linux/fdtable.h
++++ b/include/linux/fdtable.h
+@@ -9,6 +9,7 @@
+ #include <linux/compiler.h>
+ #include <linux/spinlock.h>
+ #include <linux/rcupdate.h>
++#include <linux/nospec.h>
+ #include <linux/types.h>
+ #include <linux/init.h>
+ #include <linux/fs.h>
+@@ -81,8 +82,10 @@ static inline struct file *__fcheck_file
+ {
+       struct fdtable *fdt = rcu_dereference_raw(files->fdt);
+-      if (fd < fdt->max_fds)
++      if (fd < fdt->max_fds) {
++              fd = array_index_nospec(fd, fdt->max_fds);
+               return rcu_dereference_raw(fdt->fd[fd]);
++      }
+       return NULL;
+ }
diff --git a/queue-4.9/x86-alternative-print-unadorned-pointers.patch b/queue-4.9/x86-alternative-print-unadorned-pointers.patch
new file mode 100644 (file)
index 0000000..79ec761
--- /dev/null
@@ -0,0 +1,92 @@
+From foo@baz Thu Feb  8 03:30:27 CET 2018
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 26 Jan 2018 13:11:36 +0100
+Subject: x86/alternative: Print unadorned pointers
+
+From: Borislav Petkov <bp@suse.de>
+
+(cherry picked from commit 0e6c16c652cadaffd25a6bb326ec10da5bcec6b4)
+
+After commit ad67b74d2469 ("printk: hash addresses printed with %p")
+pointers are being hashed when printed. However, this makes the alternative
+debug output completely useless. Switch to %px in order to see the
+unadorned kernel pointers.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: riel@redhat.com
+Cc: ak@linux.intel.com
+Cc: peterz@infradead.org
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: jikos@kernel.org
+Cc: luto@amacapital.net
+Cc: dave.hansen@intel.com
+Cc: torvalds@linux-foundation.org
+Cc: keescook@google.com
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Cc: pjt@google.com
+Link: https://lkml.kernel.org/r/20180126121139.31959-2-bp@alien8.de
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/alternative.c |   14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -298,7 +298,7 @@ recompute_jump(struct alt_instr *a, u8 *
+       tgt_rip  = next_rip + o_dspl;
+       n_dspl = tgt_rip - orig_insn;
+-      DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
++      DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
+       if (tgt_rip - orig_insn >= 0) {
+               if (n_dspl - 2 <= 127)
+@@ -352,7 +352,7 @@ static void __init_or_module optimize_no
+       sync_core();
+       local_irq_restore(flags);
+-      DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
++      DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
+                  instr, a->instrlen - a->padlen, a->padlen);
+ }
+@@ -370,7 +370,7 @@ void __init_or_module apply_alternatives
+       u8 *instr, *replacement;
+       u8 insnbuf[MAX_PATCH_LEN];
+-      DPRINTK("alt table %p -> %p", start, end);
++      DPRINTK("alt table %px, -> %px", start, end);
+       /*
+        * The scan order should be from start to end. A later scanned
+        * alternative code can overwrite previously scanned alternative code.
+@@ -394,14 +394,14 @@ void __init_or_module apply_alternatives
+                       continue;
+               }
+-              DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
++              DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d",
+                       a->cpuid >> 5,
+                       a->cpuid & 0x1f,
+                       instr, a->instrlen,
+                       replacement, a->replacementlen, a->padlen);
+-              DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
+-              DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
++              DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
++              DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
+               memcpy(insnbuf, replacement, a->replacementlen);
+               insnbuf_sz = a->replacementlen;
+@@ -422,7 +422,7 @@ void __init_or_module apply_alternatives
+                                a->instrlen - a->replacementlen);
+                       insnbuf_sz += a->instrlen - a->replacementlen;
+               }
+-              DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
++              DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr);
+               text_poke_early(instr, insnbuf, insnbuf_sz);
+       }
diff --git a/queue-4.9/x86-asm-move-status-from-thread_struct-to-thread_info.patch b/queue-4.9/x86-asm-move-status-from-thread_struct-to-thread_info.patch
new file mode 100644 (file)
index 0000000..b22fc0a
--- /dev/null
@@ -0,0 +1,172 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sun, 28 Jan 2018 10:38:50 -0800
+Subject: x86/asm: Move 'status' from thread_struct to thread_info
+
+From: Andy Lutomirski <luto@kernel.org>
+
+
+(cherry picked from commit 37a8f7c38339b22b69876d6f5a0ab851565284e3)
+
+The TS_COMPAT bit is very hot and is accessed from code paths that mostly
+also touch thread_info::flags.  Move it into struct thread_info to improve
+cache locality.
+
+The only reason it was in thread_struct is that there was a brief period
+during which arch-specific fields were not allowed in struct thread_info.
+
+Linus suggested further changing:
+
+  ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
+
+to:
+
+  if (unlikely(ti->status & (TS_COMPAT|TS_I386_REGS_POKED)))
+          ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
+
+on the theory that frequently dirtying the cacheline even in pure 64-bit
+code that never needs to modify status hurts performance.  That could be a
+reasonable followup patch, but I suspect it matters less on top of this
+patch.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Kernel Hardening <kernel-hardening@lists.openwall.com>
+Link: https://lkml.kernel.org/r/03148bcc1b217100e6e8ecf6a5468c45cf4304b6.1517164461.git.luto@kernel.org
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/entry/common.c            |    4 ++--
+ arch/x86/include/asm/processor.h   |    2 --
+ arch/x86/include/asm/syscall.h     |    6 +++---
+ arch/x86/include/asm/thread_info.h |    3 ++-
+ arch/x86/kernel/process_64.c       |    4 ++--
+ arch/x86/kernel/ptrace.c           |    2 +-
+ arch/x86/kernel/signal.c           |    2 +-
+ 7 files changed, 11 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -201,7 +201,7 @@ __visible inline void prepare_exit_to_us
+        * special case only applies after poking regs and before the
+        * very next return to user mode.
+        */
+-      current->thread.status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
++      ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
+ #endif
+       user_enter_irqoff();
+@@ -299,7 +299,7 @@ static __always_inline void do_syscall_3
+       unsigned int nr = (unsigned int)regs->orig_ax;
+ #ifdef CONFIG_IA32_EMULATION
+-      current->thread.status |= TS_COMPAT;
++      ti->status |= TS_COMPAT;
+ #endif
+       if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -391,8 +391,6 @@ struct thread_struct {
+       unsigned short          gsindex;
+ #endif
+-      u32                     status;         /* thread synchronous flags */
+-
+ #ifdef CONFIG_X86_64
+       unsigned long           fsbase;
+       unsigned long           gsbase;
+--- a/arch/x86/include/asm/syscall.h
++++ b/arch/x86/include/asm/syscall.h
+@@ -60,7 +60,7 @@ static inline long syscall_get_error(str
+        * TS_COMPAT is set for 32-bit syscall entries and then
+        * remains set until we return to user mode.
+        */
+-      if (task->thread.status & (TS_COMPAT|TS_I386_REGS_POKED))
++      if (task->thread_info.status & (TS_COMPAT|TS_I386_REGS_POKED))
+               /*
+                * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
+                * and will match correctly in comparisons.
+@@ -116,7 +116,7 @@ static inline void syscall_get_arguments
+                                        unsigned long *args)
+ {
+ # ifdef CONFIG_IA32_EMULATION
+-      if (task->thread.status & TS_COMPAT)
++      if (task->thread_info.status & TS_COMPAT)
+               switch (i) {
+               case 0:
+                       if (!n--) break;
+@@ -177,7 +177,7 @@ static inline void syscall_set_arguments
+                                        const unsigned long *args)
+ {
+ # ifdef CONFIG_IA32_EMULATION
+-      if (task->thread.status & TS_COMPAT)
++      if (task->thread_info.status & TS_COMPAT)
+               switch (i) {
+               case 0:
+                       if (!n--) break;
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -54,6 +54,7 @@ struct task_struct;
+ struct thread_info {
+       unsigned long           flags;          /* low level flags */
++      u32                     status;         /* thread synchronous flags */
+ };
+ #define INIT_THREAD_INFO(tsk)                 \
+@@ -213,7 +214,7 @@ static inline int arch_within_stack_fram
+ #define in_ia32_syscall() true
+ #else
+ #define in_ia32_syscall() (IS_ENABLED(CONFIG_IA32_EMULATION) && \
+-                         current->thread.status & TS_COMPAT)
++                         current_thread_info()->status & TS_COMPAT)
+ #endif
+ /*
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -538,7 +538,7 @@ void set_personality_ia32(bool x32)
+               current->personality &= ~READ_IMPLIES_EXEC;
+               /* in_compat_syscall() uses the presence of the x32
+                  syscall bit flag to determine compat status */
+-              current->thread.status &= ~TS_COMPAT;
++              current_thread_info()->status &= ~TS_COMPAT;
+       } else {
+               set_thread_flag(TIF_IA32);
+               clear_thread_flag(TIF_X32);
+@@ -546,7 +546,7 @@ void set_personality_ia32(bool x32)
+                       current->mm->context.ia32_compat = TIF_IA32;
+               current->personality |= force_personality32;
+               /* Prepare the first "return" to user space */
+-              current->thread.status |= TS_COMPAT;
++              current_thread_info()->status |= TS_COMPAT;
+       }
+ }
+ EXPORT_SYMBOL_GPL(set_personality_ia32);
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -934,7 +934,7 @@ static int putreg32(struct task_struct *
+                */
+               regs->orig_ax = value;
+               if (syscall_get_nr(child, regs) >= 0)
+-                      child->thread.status |= TS_I386_REGS_POKED;
++                      child->thread_info.status |= TS_I386_REGS_POKED;
+               break;
+       case offsetof(struct user32, regs.eflags):
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -785,7 +785,7 @@ static inline unsigned long get_nr_resta
+        * than the tracee.
+        */
+ #ifdef CONFIG_IA32_EMULATION
+-      if (current->thread.status & (TS_COMPAT|TS_I386_REGS_POKED))
++      if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED))
+               return __NR_ia32_restart_syscall;
+ #endif
+ #ifdef CONFIG_X86_X32_ABI
diff --git a/queue-4.9/x86-bugs-drop-one-mitigation-from-dmesg.patch b/queue-4.9/x86-bugs-drop-one-mitigation-from-dmesg.patch
new file mode 100644 (file)
index 0000000..d9380c1
--- /dev/null
@@ -0,0 +1,52 @@
+From foo@baz Thu Feb  8 03:30:27 CET 2018
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 26 Jan 2018 13:11:39 +0100
+Subject: x86/bugs: Drop one "mitigation" from dmesg
+
+From: Borislav Petkov <bp@suse.de>
+
+(cherry picked from commit 55fa19d3e51f33d9cd4056d25836d93abf9438db)
+
+Make
+
+[    0.031118] Spectre V2 mitigation: Mitigation: Full generic retpoline
+
+into
+
+[    0.031118] Spectre V2: Mitigation: Full generic retpoline
+
+to reduce the mitigation mitigations strings.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: riel@redhat.com
+Cc: ak@linux.intel.com
+Cc: peterz@infradead.org
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: jikos@kernel.org
+Cc: luto@amacapital.net
+Cc: dave.hansen@intel.com
+Cc: torvalds@linux-foundation.org
+Cc: keescook@google.com
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: tim.c.chen@linux.intel.com
+Cc: pjt@google.com
+Link: https://lkml.kernel.org/r/20180126121139.31959-5-bp@alien8.de
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -90,7 +90,7 @@ static const char *spectre_v2_strings[]
+ };
+ #undef pr_fmt
+-#define pr_fmt(fmt)     "Spectre V2 mitigation: " fmt
++#define pr_fmt(fmt)     "Spectre V2 : " fmt
+ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+ static bool spectre_v2_bad_module;
diff --git a/queue-4.9/x86-cpu-bugs-make-retpoline-module-warning-conditional.patch b/queue-4.9/x86-cpu-bugs-make-retpoline-module-warning-conditional.patch
new file mode 100644 (file)
index 0000000..f8a1204
--- /dev/null
@@ -0,0 +1,66 @@
+From foo@baz Thu Feb  8 03:30:27 CET 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 27 Jan 2018 15:45:14 +0100
+Subject: x86/cpu/bugs: Make retpoline module warning conditional
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+(cherry picked from commit e383095c7fe8d218e00ec0f83e4b95ed4e627b02)
+
+If sysfs is disabled and RETPOLINE not defined:
+
+arch/x86/kernel/cpu/bugs.c:97:13: warning: â€˜spectre_v2_bad_module’ defined but not used
+[-Wunused-variable]
+ static bool spectre_v2_bad_module;
+
+Hide it.
+
+Fixes: caf7501a1b4e ("module/retpoline: Warn about missing retpoline in module")
+Reported-by: Borislav Petkov <bp@alien8.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |   14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -93,9 +93,10 @@ static const char *spectre_v2_strings[]
+ #define pr_fmt(fmt)     "Spectre V2 : " fmt
+ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+-static bool spectre_v2_bad_module;
+ #ifdef RETPOLINE
++static bool spectre_v2_bad_module;
++
+ bool retpoline_module_ok(bool has_retpoline)
+ {
+       if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
+@@ -105,6 +106,13 @@ bool retpoline_module_ok(bool has_retpol
+       spectre_v2_bad_module = true;
+       return false;
+ }
++
++static inline const char *spectre_v2_module_string(void)
++{
++      return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
++}
++#else
++static inline const char *spectre_v2_module_string(void) { return ""; }
+ #endif
+ static void __init spec2_print_if_insecure(const char *reason)
+@@ -299,7 +307,7 @@ ssize_t cpu_show_spectre_v2(struct devic
+               return sprintf(buf, "Not affected\n");
+       return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+-                     boot_cpu_has(X86_FEATURE_IBPB) ? ", IPBP" : "",
+-                     spectre_v2_bad_module ? " - vulnerable module loaded" : "");
++                     boot_cpu_has(X86_FEATURE_IBPB) ? ", IBPB" : "",
++                     spectre_v2_module_string());
+ }
+ #endif
diff --git a/queue-4.9/x86-cpufeatures-clean-up-spectre-v2-related-cpuid-flags.patch b/queue-4.9/x86-cpufeatures-clean-up-spectre-v2-related-cpuid-flags.patch
new file mode 100644 (file)
index 0000000..96900c0
--- /dev/null
@@ -0,0 +1,171 @@
+From foo@baz Thu Feb  8 03:30:27 CET 2018
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Sat, 27 Jan 2018 16:24:32 +0000
+Subject: x86/cpufeatures: Clean up Spectre v2 related CPUID flags
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+(cherry picked from commit 2961298efe1ea1b6fc0d7ee8b76018fa6c0bcef2)
+
+We want to expose the hardware features simply in /proc/cpuinfo as "ibrs",
+"ibpb" and "stibp". Since AMD has separate CPUID bits for those, use them
+as the user-visible bits.
+
+When the Intel SPEC_CTRL bit is set which indicates both IBRS and IBPB
+capability, set those (AMD) bits accordingly. Likewise if the Intel STIBP
+bit is set, set the AMD STIBP that's used for the generic hardware
+capability.
+
+Hide the rest from /proc/cpuinfo by putting "" in the comments. Including
+RETPOLINE and RETPOLINE_AMD which shouldn't be visible there. There are
+patches to make the sysfs vulnerabilities information non-readable by
+non-root, and the same should apply to all information about which
+mitigations are actually in use. Those *shouldn't* appear in /proc/cpuinfo.
+
+The feature bit for whether IBPB is actually used, which is needed for
+ALTERNATIVEs, is renamed to X86_FEATURE_USE_IBPB.
+
+Originally-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: ak@linux.intel.com
+Cc: dave.hansen@intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1517070274-12128-2-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h   |   18 +++++++++---------
+ arch/x86/include/asm/nospec-branch.h |    2 +-
+ arch/x86/kernel/cpu/bugs.c           |    7 +++----
+ arch/x86/kernel/cpu/intel.c          |   31 +++++++++++++++++++++----------
+ 4 files changed, 34 insertions(+), 24 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -194,15 +194,15 @@
+ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
+ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+-#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
+-#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
++#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
++#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+-#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
++#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+ /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
+ #define X86_FEATURE_KAISER    ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
+-#define X86_FEATURE_IBPB              ( 7*32+21) /* Indirect Branch Prediction Barrier enabled*/
++#define X86_FEATURE_USE_IBPB  ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
+@@ -260,9 +260,9 @@
+ /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
+ #define X86_FEATURE_CLZERO    (13*32+0) /* CLZERO instruction */
+ #define X86_FEATURE_IRPERF    (13*32+1) /* Instructions Retired Count */
+-#define X86_FEATURE_AMD_PRED_CMD (13*32+12) /* Prediction Command MSR (AMD) */
+-#define X86_FEATURE_AMD_SPEC_CTRL (13*32+14) /* Speculation Control MSR only (AMD) */
+-#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors (AMD) */
++#define X86_FEATURE_IBPB      (13*32+12) /* Indirect Branch Prediction Barrier */
++#define X86_FEATURE_IBRS      (13*32+14) /* Indirect Branch Restricted Speculation */
++#define X86_FEATURE_STIBP     (13*32+15) /* Single Thread Indirect Branch Predictors */
+ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
+ #define X86_FEATURE_DTHERM    (14*32+ 0) /* Digital Thermal Sensor */
+@@ -301,8 +301,8 @@
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+ #define X86_FEATURE_AVX512_4VNNIW     (18*32+ 2) /* AVX-512 Neural Network Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS     (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+-#define X86_FEATURE_SPEC_CTRL         (18*32+26) /* Speculation Control (IBRS + IBPB) */
+-#define X86_FEATURE_STIBP             (18*32+27) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_SPEC_CTRL         (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
++#define X86_FEATURE_INTEL_STIBP               (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+ /*
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -225,7 +225,7 @@ static inline void indirect_branch_predi
+                                "movl %[val], %%eax\n\t"
+                                "movl $0, %%edx\n\t"
+                                "wrmsr",
+-                               X86_FEATURE_IBPB)
++                               X86_FEATURE_USE_IBPB)
+                    : : [msr] "i" (MSR_IA32_PRED_CMD),
+                        [val] "i" (PRED_CMD_IBPB)
+                    : "eax", "ecx", "edx", "memory");
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -272,9 +272,8 @@ retpoline_auto:
+       }
+       /* Initialize Indirect Branch Prediction Barrier if supported */
+-      if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+-          boot_cpu_has(X86_FEATURE_AMD_PRED_CMD)) {
+-              setup_force_cpu_cap(X86_FEATURE_IBPB);
++      if (boot_cpu_has(X86_FEATURE_IBPB)) {
++              setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+               pr_info("Enabling Indirect Branch Prediction Barrier\n");
+       }
+ }
+@@ -307,7 +306,7 @@ ssize_t cpu_show_spectre_v2(struct devic
+               return sprintf(buf, "Not affected\n");
+       return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+-                     boot_cpu_has(X86_FEATURE_IBPB) ? ", IBPB" : "",
++                     boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+                      spectre_v2_module_string());
+ }
+ #endif
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -140,17 +140,28 @@ static void early_init_intel(struct cpui
+               rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
+       }
+-      if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
+-           cpu_has(c, X86_FEATURE_STIBP) ||
+-           cpu_has(c, X86_FEATURE_AMD_SPEC_CTRL) ||
+-           cpu_has(c, X86_FEATURE_AMD_PRED_CMD) ||
+-           cpu_has(c, X86_FEATURE_AMD_STIBP)) && bad_spectre_microcode(c)) {
+-              pr_warn("Intel Spectre v2 broken microcode detected; disabling SPEC_CTRL\n");
+-              clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL);
++      /*
++       * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
++       * and they also have a different bit for STIBP support. Also,
++       * a hypervisor might have set the individual AMD bits even on
++       * Intel CPUs, for finer-grained selection of what's available.
++       */
++      if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
++              set_cpu_cap(c, X86_FEATURE_IBRS);
++              set_cpu_cap(c, X86_FEATURE_IBPB);
++      }
++      if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
++              set_cpu_cap(c, X86_FEATURE_STIBP);
++
++      /* Now if any of them are set, check the blacklist and clear the lot */
++      if ((cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
++           cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
++              pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
++              clear_cpu_cap(c, X86_FEATURE_IBRS);
++              clear_cpu_cap(c, X86_FEATURE_IBPB);
+               clear_cpu_cap(c, X86_FEATURE_STIBP);
+-              clear_cpu_cap(c, X86_FEATURE_AMD_SPEC_CTRL);
+-              clear_cpu_cap(c, X86_FEATURE_AMD_PRED_CMD);
+-              clear_cpu_cap(c, X86_FEATURE_AMD_STIBP);
++              clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL);
++              clear_cpu_cap(c, X86_FEATURE_INTEL_STIBP);
+       }
+       /*
diff --git a/queue-4.9/x86-cpuid-fix-up-virtual-ibrs-ibpb-stibp-feature-bits-on-intel.patch b/queue-4.9/x86-cpuid-fix-up-virtual-ibrs-ibpb-stibp-feature-bits-on-intel.patch
new file mode 100644 (file)
index 0000000..7e942e5
--- /dev/null
@@ -0,0 +1,122 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Tue, 30 Jan 2018 14:30:23 +0000
+Subject: x86/cpuid: Fix up "virtual" IBRS/IBPB/STIBP feature bits on Intel
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+
+(cherry picked from commit 7fcae1118f5fd44a862aa5c3525248e35ee67c3b)
+
+Despite the fact that all the other code there seems to be doing it, just
+using set_cpu_cap() in early_intel_init() doesn't actually work.
+
+For CPUs with PKU support, setup_pku() calls get_cpu_cap() after
+c->c_init() has set those feature bits. That resets those bits back to what
+was queried from the hardware.
+
+Turning the bits off for bad microcode is easy to fix. That can just use
+setup_clear_cpu_cap() to force them off for all CPUs.
+
+I was less keen on forcing the feature bits *on* that way, just in case
+of inconsistencies. I appreciate that the kernel is going to get this
+utterly wrong if CPU features are not consistent, because it has already
+applied alternatives by the time secondary CPUs are brought up.
+
+But at least if setup_force_cpu_cap() isn't being used, we might have a
+chance of *detecting* the lack of the corresponding bit and either
+panicking or refusing to bring the offending CPU online.
+
+So ensure that the appropriate feature bits are set within get_cpu_cap()
+regardless of how many extra times it's called.
+
+Fixes: 2961298e ("x86/cpufeatures: Clean up Spectre v2 related CPUID flags")
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: karahmed@amazon.de
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Link: https://lkml.kernel.org/r/1517322623-15261-1-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c |   21 +++++++++++++++++++++
+ arch/x86/kernel/cpu/intel.c  |   27 ++++++++-------------------
+ 2 files changed, 29 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -718,6 +718,26 @@ static void apply_forced_caps(struct cpu
+       }
+ }
++static void init_speculation_control(struct cpuinfo_x86 *c)
++{
++      /*
++       * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
++       * and they also have a different bit for STIBP support. Also,
++       * a hypervisor might have set the individual AMD bits even on
++       * Intel CPUs, for finer-grained selection of what's available.
++       *
++       * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
++       * features, which are visible in /proc/cpuinfo and used by the
++       * kernel. So set those accordingly from the Intel bits.
++       */
++      if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
++              set_cpu_cap(c, X86_FEATURE_IBRS);
++              set_cpu_cap(c, X86_FEATURE_IBPB);
++      }
++      if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
++              set_cpu_cap(c, X86_FEATURE_STIBP);
++}
++
+ void get_cpu_cap(struct cpuinfo_x86 *c)
+ {
+       u32 eax, ebx, ecx, edx;
+@@ -812,6 +832,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
+               c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
+       init_scattered_cpuid_features(c);
++      init_speculation_control(c);
+ }
+ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -140,28 +140,17 @@ static void early_init_intel(struct cpui
+               rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
+       }
+-      /*
+-       * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
+-       * and they also have a different bit for STIBP support. Also,
+-       * a hypervisor might have set the individual AMD bits even on
+-       * Intel CPUs, for finer-grained selection of what's available.
+-       */
+-      if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+-              set_cpu_cap(c, X86_FEATURE_IBRS);
+-              set_cpu_cap(c, X86_FEATURE_IBPB);
+-      }
+-      if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+-              set_cpu_cap(c, X86_FEATURE_STIBP);
+-
+       /* Now if any of them are set, check the blacklist and clear the lot */
+-      if ((cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
++      if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
++           cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
++           cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
+            cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
+               pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
+-              clear_cpu_cap(c, X86_FEATURE_IBRS);
+-              clear_cpu_cap(c, X86_FEATURE_IBPB);
+-              clear_cpu_cap(c, X86_FEATURE_STIBP);
+-              clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL);
+-              clear_cpu_cap(c, X86_FEATURE_INTEL_STIBP);
++              setup_clear_cpu_cap(X86_FEATURE_IBRS);
++              setup_clear_cpu_cap(X86_FEATURE_IBPB);
++              setup_clear_cpu_cap(X86_FEATURE_STIBP);
++              setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
++              setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+       }
+       /*
diff --git a/queue-4.9/x86-entry-64-push-extra-regs-right-away.patch b/queue-4.9/x86-entry-64-push-extra-regs-right-away.patch
new file mode 100644 (file)
index 0000000..e80f751
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Thu Feb  8 03:30:27 CET 2018
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sun, 28 Jan 2018 10:38:49 -0800
+Subject: x86/entry/64: Push extra regs right away
+
+From: Andy Lutomirski <luto@kernel.org>
+
+(cherry picked from commit d1f7732009e0549eedf8ea1db948dc37be77fd46)
+
+With the fast path removed there is no point in splitting the push of the
+normal and the extra register set. Just push the extra regs right away.
+
+[ tglx: Split out from 'x86/entry/64: Remove the SYSCALL64 fast path' ]
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Kernel Hardening <kernel-hardening@lists.openwall.com>
+Link: https://lkml.kernel.org/r/462dff8d4d64dfbfc851fbf3130641809d980ecd.1517164461.git.luto@kernel.org
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/entry/entry_64.S |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -177,10 +177,14 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
+       pushq   %r9                             /* pt_regs->r9 */
+       pushq   %r10                            /* pt_regs->r10 */
+       pushq   %r11                            /* pt_regs->r11 */
+-      sub     $(6*8), %rsp                    /* pt_regs->bp, bx, r12-15 not saved */
++      pushq   %rbx                            /* pt_regs->rbx */
++      pushq   %rbp                            /* pt_regs->rbp */
++      pushq   %r12                            /* pt_regs->r12 */
++      pushq   %r13                            /* pt_regs->r13 */
++      pushq   %r14                            /* pt_regs->r14 */
++      pushq   %r15                            /* pt_regs->r15 */
+       /* IRQs are off. */
+-      SAVE_EXTRA_REGS
+       movq    %rsp, %rdi
+       call    do_syscall_64           /* returns with IRQs disabled */
diff --git a/queue-4.9/x86-entry-64-remove-the-syscall64-fast-path.patch b/queue-4.9/x86-entry-64-remove-the-syscall64-fast-path.patch
new file mode 100644 (file)
index 0000000..daa62bd
--- /dev/null
@@ -0,0 +1,202 @@
+From foo@baz Thu Feb  8 03:30:27 CET 2018
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sun, 28 Jan 2018 10:38:49 -0800
+Subject: x86/entry/64: Remove the SYSCALL64 fast path
+
+From: Andy Lutomirski <luto@kernel.org>
+
+(cherry picked from commit 21d375b6b34ff511a507de27bf316b3dde6938d9)
+
+The SYCALLL64 fast path was a nice, if small, optimization back in the good
+old days when syscalls were actually reasonably fast.  Now there is PTI to
+slow everything down, and indirect branches are verboten, making everything
+messier.  The retpoline code in the fast path is particularly nasty.
+
+Just get rid of the fast path. The slow path is barely slower.
+
+[ tglx: Split out the 'push all extra regs' part ]
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Kernel Hardening <kernel-hardening@lists.openwall.com>
+Link: https://lkml.kernel.org/r/462dff8d4d64dfbfc851fbf3130641809d980ecd.1517164461.git.luto@kernel.org
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/entry/entry_64.S   |  123 --------------------------------------------
+ arch/x86/entry/syscall_64.c |    7 --
+ 2 files changed, 3 insertions(+), 127 deletions(-)
+
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -179,94 +179,11 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
+       pushq   %r11                            /* pt_regs->r11 */
+       sub     $(6*8), %rsp                    /* pt_regs->bp, bx, r12-15 not saved */
+-      /*
+-       * If we need to do entry work or if we guess we'll need to do
+-       * exit work, go straight to the slow path.
+-       */
+-      movq    PER_CPU_VAR(current_task), %r11
+-      testl   $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
+-      jnz     entry_SYSCALL64_slow_path
+-
+-entry_SYSCALL_64_fastpath:
+-      /*
+-       * Easy case: enable interrupts and issue the syscall.  If the syscall
+-       * needs pt_regs, we'll call a stub that disables interrupts again
+-       * and jumps to the slow path.
+-       */
+-      TRACE_IRQS_ON
+-      ENABLE_INTERRUPTS(CLBR_NONE)
+-#if __SYSCALL_MASK == ~0
+-      cmpq    $__NR_syscall_max, %rax
+-#else
+-      andl    $__SYSCALL_MASK, %eax
+-      cmpl    $__NR_syscall_max, %eax
+-#endif
+-      ja      1f                              /* return -ENOSYS (already in pt_regs->ax) */
+-      movq    %r10, %rcx
+-
+-      /*
+-       * This call instruction is handled specially in stub_ptregs_64.
+-       * It might end up jumping to the slow path.  If it jumps, RAX
+-       * and all argument registers are clobbered.
+-       */
+-#ifdef CONFIG_RETPOLINE
+-      movq    sys_call_table(, %rax, 8), %rax
+-      call    __x86_indirect_thunk_rax
+-#else
+-      call    *sys_call_table(, %rax, 8)
+-#endif
+-.Lentry_SYSCALL_64_after_fastpath_call:
+-
+-      movq    %rax, RAX(%rsp)
+-1:
+-
+-      /*
+-       * If we get here, then we know that pt_regs is clean for SYSRET64.
+-       * If we see that no exit work is required (which we are required
+-       * to check with IRQs off), then we can go straight to SYSRET64.
+-       */
+-      DISABLE_INTERRUPTS(CLBR_NONE)
+-      TRACE_IRQS_OFF
+-      movq    PER_CPU_VAR(current_task), %r11
+-      testl   $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
+-      jnz     1f
+-
+-      LOCKDEP_SYS_EXIT
+-      TRACE_IRQS_ON           /* user mode is traced as IRQs on */
+-      movq    RIP(%rsp), %rcx
+-      movq    EFLAGS(%rsp), %r11
+-      RESTORE_C_REGS_EXCEPT_RCX_R11
+-      /*
+-       * This opens a window where we have a user CR3, but are
+-       * running in the kernel.  This makes using the CS
+-       * register useless for telling whether or not we need to
+-       * switch CR3 in NMIs.  Normal interrupts are OK because
+-       * they are off here.
+-       */
+-      SWITCH_USER_CR3
+-      movq    RSP(%rsp), %rsp
+-      USERGS_SYSRET64
+-
+-1:
+-      /*
+-       * The fast path looked good when we started, but something changed
+-       * along the way and we need to switch to the slow path.  Calling
+-       * raise(3) will trigger this, for example.  IRQs are off.
+-       */
+-      TRACE_IRQS_ON
+-      ENABLE_INTERRUPTS(CLBR_NONE)
+-      SAVE_EXTRA_REGS
+-      movq    %rsp, %rdi
+-      call    syscall_return_slowpath /* returns with IRQs disabled */
+-      jmp     return_from_SYSCALL_64
+-
+-entry_SYSCALL64_slow_path:
+       /* IRQs are off. */
+       SAVE_EXTRA_REGS
+       movq    %rsp, %rdi
+       call    do_syscall_64           /* returns with IRQs disabled */
+-return_from_SYSCALL_64:
+       RESTORE_EXTRA_REGS
+       TRACE_IRQS_IRETQ                /* we're about to change IF */
+@@ -339,6 +256,7 @@ return_from_SYSCALL_64:
+ syscall_return_via_sysret:
+       /* rcx and r11 are already restored (see code above) */
+       RESTORE_C_REGS_EXCEPT_RCX_R11
++
+       /*
+        * This opens a window where we have a user CR3, but are
+        * running in the kernel.  This makes using the CS
+@@ -363,45 +281,6 @@ opportunistic_sysret_failed:
+       jmp     restore_c_regs_and_iret
+ END(entry_SYSCALL_64)
+-ENTRY(stub_ptregs_64)
+-      /*
+-       * Syscalls marked as needing ptregs land here.
+-       * If we are on the fast path, we need to save the extra regs,
+-       * which we achieve by trying again on the slow path.  If we are on
+-       * the slow path, the extra regs are already saved.
+-       *
+-       * RAX stores a pointer to the C function implementing the syscall.
+-       * IRQs are on.
+-       */
+-      cmpq    $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
+-      jne     1f
+-
+-      /*
+-       * Called from fast path -- disable IRQs again, pop return address
+-       * and jump to slow path
+-       */
+-      DISABLE_INTERRUPTS(CLBR_NONE)
+-      TRACE_IRQS_OFF
+-      popq    %rax
+-      jmp     entry_SYSCALL64_slow_path
+-
+-1:
+-      JMP_NOSPEC %rax                         /* Called from C */
+-END(stub_ptregs_64)
+-
+-.macro ptregs_stub func
+-ENTRY(ptregs_\func)
+-      leaq    \func(%rip), %rax
+-      jmp     stub_ptregs_64
+-END(ptregs_\func)
+-.endm
+-
+-/* Instantiate ptregs_stub for each ptregs-using syscall */
+-#define __SYSCALL_64_QUAL_(sym)
+-#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym
+-#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym)
+-#include <asm/syscalls_64.h>
+-
+ /*
+  * %rdi: prev task
+  * %rsi: next task
+--- a/arch/x86/entry/syscall_64.c
++++ b/arch/x86/entry/syscall_64.c
+@@ -6,14 +6,11 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/syscall.h>
+-#define __SYSCALL_64_QUAL_(sym) sym
+-#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym
+-
+-#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
++#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+ #include <asm/syscalls_64.h>
+ #undef __SYSCALL_64
+-#define __SYSCALL_64(nr, sym, qual) [nr] = __SYSCALL_64_QUAL_##qual(sym),
++#define __SYSCALL_64(nr, sym, qual) [nr] = sym,
+ extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
diff --git a/queue-4.9/x86-get_user-use-pointer-masking-to-limit-speculation.patch b/queue-4.9/x86-get_user-use-pointer-masking-to-limit-speculation.patch
new file mode 100644 (file)
index 0000000..e81910d
--- /dev/null
@@ -0,0 +1,98 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 29 Jan 2018 17:02:54 -0800
+Subject: x86/get_user: Use pointer masking to limit speculation
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+
+(cherry picked from commit c7f631cb07e7da06ac1d231ca178452339e32a94)
+
+Quoting Linus:
+
+    I do think that it would be a good idea to very expressly document
+    the fact that it's not that the user access itself is unsafe. I do
+    agree that things like "get_user()" want to be protected, but not
+    because of any direct bugs or problems with get_user() and friends,
+    but simply because get_user() is an excellent source of a pointer
+    that is obviously controlled from a potentially attacking user
+    space. So it's a prime candidate for then finding _subsequent_
+    accesses that can then be used to perturb the cache.
+
+Unlike the __get_user() case get_user() includes the address limit check
+near the pointer de-reference. With that locality the speculation can be
+mitigated with pointer narrowing rather than a barrier, i.e.
+array_index_nospec(). Where the narrowing is performed by:
+
+       cmp %limit, %ptr
+       sbb %mask, %mask
+       and %mask, %ptr
+
+With respect to speculation the value of %ptr is either less than %limit
+or NULL.
+
+Co-developed-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arch@vger.kernel.org
+Cc: Kees Cook <keescook@chromium.org>
+Cc: kernel-hardening@lists.openwall.com
+Cc: gregkh@linuxfoundation.org
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: torvalds@linux-foundation.org
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727417469.33451.11804043010080838495.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/lib/getuser.S |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/x86/lib/getuser.S
++++ b/arch/x86/lib/getuser.S
+@@ -39,6 +39,8 @@ ENTRY(__get_user_1)
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user
++      sbb %_ASM_DX, %_ASM_DX          /* array_index_mask_nospec() */
++      and %_ASM_DX, %_ASM_AX
+       ASM_STAC
+ 1:    movzbl (%_ASM_AX),%edx
+       xor %eax,%eax
+@@ -53,6 +55,8 @@ ENTRY(__get_user_2)
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user
++      sbb %_ASM_DX, %_ASM_DX          /* array_index_mask_nospec() */
++      and %_ASM_DX, %_ASM_AX
+       ASM_STAC
+ 2:    movzwl -1(%_ASM_AX),%edx
+       xor %eax,%eax
+@@ -67,6 +71,8 @@ ENTRY(__get_user_4)
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user
++      sbb %_ASM_DX, %_ASM_DX          /* array_index_mask_nospec() */
++      and %_ASM_DX, %_ASM_AX
+       ASM_STAC
+ 3:    movl -3(%_ASM_AX),%edx
+       xor %eax,%eax
+@@ -82,6 +88,8 @@ ENTRY(__get_user_8)
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user
++      sbb %_ASM_DX, %_ASM_DX          /* array_index_mask_nospec() */
++      and %_ASM_DX, %_ASM_AX
+       ASM_STAC
+ 4:    movq -7(%_ASM_AX),%rdx
+       xor %eax,%eax
+@@ -93,6 +101,8 @@ ENTRY(__get_user_8)
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user_8
++      sbb %_ASM_DX, %_ASM_DX          /* array_index_mask_nospec() */
++      and %_ASM_DX, %_ASM_AX
+       ASM_STAC
+ 4:    movl -7(%_ASM_AX),%edx
+ 5:    movl -3(%_ASM_AX),%ecx
diff --git a/queue-4.9/x86-implement-array_index_mask_nospec.patch b/queue-4.9/x86-implement-array_index_mask_nospec.patch
new file mode 100644 (file)
index 0000000..23b2cba
--- /dev/null
@@ -0,0 +1,66 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 29 Jan 2018 17:02:28 -0800
+Subject: x86: Implement array_index_mask_nospec
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+
+(cherry picked from commit babdde2698d482b6c0de1eab4f697cf5856c5859)
+
+array_index_nospec() uses a mask to sanitize user controllable array
+indexes, i.e. generate a 0 mask if 'index' >= 'size', and a ~0 mask
+otherwise. While the default array_index_mask_nospec() handles the
+carry-bit from the (index - size) result in software.
+
+The x86 array_index_mask_nospec() does the same, but the carry-bit is
+handled in the processor CF flag without conditional instructions in the
+control flow.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arch@vger.kernel.org
+Cc: kernel-hardening@lists.openwall.com
+Cc: gregkh@linuxfoundation.org
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727414808.33451.1873237130672785331.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/barrier.h |   24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -23,6 +23,30 @@
+ #define wmb() asm volatile("sfence" ::: "memory")
+ #endif
++/**
++ * array_index_mask_nospec() - generate a mask that is ~0UL when the
++ *    bounds check succeeds and 0 otherwise
++ * @index: array element index
++ * @size: number of elements in array
++ *
++ * Returns:
++ *     0 - (index < size)
++ */
++static inline unsigned long array_index_mask_nospec(unsigned long index,
++              unsigned long size)
++{
++      unsigned long mask;
++
++      asm ("cmp %1,%2; sbb %0,%0;"
++                      :"=r" (mask)
++                      :"r"(size),"r" (index)
++                      :"cc");
++      return mask;
++}
++
++/* Override the default implementation from linux/nospec.h. */
++#define array_index_mask_nospec array_index_mask_nospec
++
+ #ifdef CONFIG_X86_PPRO_FENCE
+ #define dma_rmb()     rmb()
+ #else
diff --git a/queue-4.9/x86-introduce-__uaccess_begin_nospec-and-uaccess_try_nospec.patch b/queue-4.9/x86-introduce-__uaccess_begin_nospec-and-uaccess_try_nospec.patch
new file mode 100644 (file)
index 0000000..885922d
--- /dev/null
@@ -0,0 +1,80 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 29 Jan 2018 17:02:39 -0800
+Subject: x86: Introduce __uaccess_begin_nospec() and uaccess_try_nospec
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+
+(cherry picked from commit b3bbfb3fb5d25776b8e3f361d2eedaabb0b496cd)
+
+For __get_user() paths, do not allow the kernel to speculate on the value
+of a user controlled pointer. In addition to the 'stac' instruction for
+Supervisor Mode Access Protection (SMAP), a barrier_nospec() causes the
+access_ok() result to resolve in the pipeline before the CPU might take any
+speculative action on the pointer value. Given the cost of 'stac' the
+speculation barrier is placed after 'stac' to hopefully overlap the cost of
+disabling SMAP with the cost of flushing the instruction pipeline.
+
+Since __get_user is a major kernel interface that deals with user
+controlled pointers, the __uaccess_begin_nospec() mechanism will prevent
+speculative execution past an access_ok() permission check. While
+speculative execution past access_ok() is not enough to lead to a kernel
+memory leak, it is a necessary precondition.
+
+To be clear, __uaccess_begin_nospec() is addressing a class of potential
+problems near __get_user() usages.
+
+Note, that while the barrier_nospec() in __uaccess_begin_nospec() is used
+to protect __get_user(), pointer masking similar to array_index_nospec()
+will be used for get_user() since it incorporates a bounds check near the
+usage.
+
+uaccess_try_nospec provides the same mechanism for get_user_try.
+
+No functional changes.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Suggested-by: Andi Kleen <ak@linux.intel.com>
+Suggested-by: Ingo Molnar <mingo@redhat.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arch@vger.kernel.org
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: kernel-hardening@lists.openwall.com
+Cc: gregkh@linuxfoundation.org
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727415922.33451.5796614273104346583.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/uaccess.h |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -123,6 +123,11 @@ extern int __get_user_bad(void);
+ #define __uaccess_begin() stac()
+ #define __uaccess_end()   clac()
++#define __uaccess_begin_nospec()      \
++({                                    \
++      stac();                         \
++      barrier_nospec();               \
++})
+ /*
+  * This is a type: either unsigned long, if the argument fits into
+@@ -474,6 +479,10 @@ struct __large_struct { unsigned long bu
+       __uaccess_begin();                                              \
+       barrier();
++#define uaccess_try_nospec do {                                               \
++      current->thread.uaccess_err = 0;                                \
++      __uaccess_begin_nospec();                                       \
++
+ #define uaccess_catch(err)                                            \
+       __uaccess_end();                                                \
+       (err) |= (current->thread.uaccess_err ? -EFAULT : 0);           \
diff --git a/queue-4.9/x86-introduce-barrier_nospec.patch b/queue-4.9/x86-introduce-barrier_nospec.patch
new file mode 100644 (file)
index 0000000..ae6aa42
--- /dev/null
@@ -0,0 +1,66 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 29 Jan 2018 17:02:33 -0800
+Subject: x86: Introduce barrier_nospec
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+
+(cherry picked from commit b3d7ad85b80bbc404635dca80f5b129f6242bc7a)
+
+Rename the open coded form of this instruction sequence from
+rdtsc_ordered() into a generic barrier primitive, barrier_nospec().
+
+One of the mitigations for Spectre variant1 vulnerabilities is to fence
+speculative execution after successfully validating a bounds check. I.e.
+force the result of a bounds check to resolve in the instruction pipeline
+to ensure speculative execution honors that result before potentially
+operating on out-of-bounds data.
+
+No functional changes.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Suggested-by: Andi Kleen <ak@linux.intel.com>
+Suggested-by: Ingo Molnar <mingo@redhat.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arch@vger.kernel.org
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: kernel-hardening@lists.openwall.com
+Cc: gregkh@linuxfoundation.org
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727415361.33451.9049453007262764675.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/barrier.h |    4 ++++
+ arch/x86/include/asm/msr.h     |    3 +--
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -47,6 +47,10 @@ static inline unsigned long array_index_
+ /* Override the default implementation from linux/nospec.h. */
+ #define array_index_mask_nospec array_index_mask_nospec
++/* Prevent speculative execution past this barrier. */
++#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
++                                         "lfence", X86_FEATURE_LFENCE_RDTSC)
++
+ #ifdef CONFIG_X86_PPRO_FENCE
+ #define dma_rmb()     rmb()
+ #else
+--- a/arch/x86/include/asm/msr.h
++++ b/arch/x86/include/asm/msr.h
+@@ -188,8 +188,7 @@ static __always_inline unsigned long lon
+        * that some other imaginary CPU is updating continuously with a
+        * time stamp.
+        */
+-      alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
+-                        "lfence", X86_FEATURE_LFENCE_RDTSC);
++      barrier_nospec();
+       return rdtsc();
+ }
diff --git a/queue-4.9/x86-kvm-update-spectre-v1-mitigation.patch b/queue-4.9/x86-kvm-update-spectre-v1-mitigation.patch
new file mode 100644 (file)
index 0000000..10533ee
--- /dev/null
@@ -0,0 +1,70 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 31 Jan 2018 17:47:03 -0800
+Subject: x86/kvm: Update spectre-v1 mitigation
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+
+(cherry picked from commit 085331dfc6bbe3501fb936e657331ca943827600)
+
+Commit 75f139aaf896 "KVM: x86: Add memory barrier on vmcs field lookup"
+added a raw 'asm("lfence");' to prevent a bounds check bypass of
+'vmcs_field_to_offset_table'.
+
+The lfence can be avoided in this path by using the array_index_nospec()
+helper designed for these types of fixes.
+
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Andrew Honig <ahonig@google.com>
+Cc: kvm@vger.kernel.org
+Cc: Jim Mattson <jmattson@google.com>
+Link: https://lkml.kernel.org/r/151744959670.6342.3001723920950249067.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c |   20 +++++++++-----------
+ 1 file changed, 9 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -33,6 +33,7 @@
+ #include <linux/slab.h>
+ #include <linux/tboot.h>
+ #include <linux/hrtimer.h>
++#include <linux/nospec.h>
+ #include "kvm_cache_regs.h"
+ #include "x86.h"
+@@ -856,21 +857,18 @@ static const unsigned short vmcs_field_t
+ static inline short vmcs_field_to_offset(unsigned long field)
+ {
+-      BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
++      const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
++      unsigned short offset;
+-      if (field >= ARRAY_SIZE(vmcs_field_to_offset_table))
++      BUILD_BUG_ON(size > SHRT_MAX);
++      if (field >= size)
+               return -ENOENT;
+-      /*
+-       * FIXME: Mitigation for CVE-2017-5753.  To be replaced with a
+-       * generic mechanism.
+-       */
+-      asm("lfence");
+-
+-      if (vmcs_field_to_offset_table[field] == 0)
++      field = array_index_nospec(field, size);
++      offset = vmcs_field_to_offset_table[field];
++      if (offset == 0)
+               return -ENOENT;
+-
+-      return vmcs_field_to_offset_table[field];
++      return offset;
+ }
+ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
diff --git a/queue-4.9/x86-nospec-fix-header-guards-names.patch b/queue-4.9/x86-nospec-fix-header-guards-names.patch
new file mode 100644 (file)
index 0000000..a9bcc71
--- /dev/null
@@ -0,0 +1,53 @@
+From foo@baz Thu Feb  8 03:30:27 CET 2018
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 26 Jan 2018 13:11:37 +0100
+Subject: x86/nospec: Fix header guards names
+
+From: Borislav Petkov <bp@suse.de>
+
+(cherry picked from commit 7a32fc51ca938e67974cbb9db31e1a43f98345a9)
+
+... to adhere to the _ASM_X86_ naming scheme.
+
+No functional change.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: riel@redhat.com
+Cc: ak@linux.intel.com
+Cc: peterz@infradead.org
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: jikos@kernel.org
+Cc: luto@amacapital.net
+Cc: dave.hansen@intel.com
+Cc: torvalds@linux-foundation.org
+Cc: keescook@google.com
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Cc: pjt@google.com
+Link: https://lkml.kernel.org/r/20180126121139.31959-3-bp@alien8.de
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef __NOSPEC_BRANCH_H__
+-#define __NOSPEC_BRANCH_H__
++#ifndef _ASM_X86_NOSPEC_BRANCH_H_
++#define _ASM_X86_NOSPEC_BRANCH_H_
+ #include <asm/alternative.h>
+ #include <asm/alternative-asm.h>
+@@ -232,4 +232,4 @@ static inline void indirect_branch_predi
+ }
+ #endif /* __ASSEMBLY__ */
+-#endif /* __NOSPEC_BRANCH_H__ */
++#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
diff --git a/queue-4.9/x86-paravirt-remove-noreplace-paravirt-cmdline-option.patch b/queue-4.9/x86-paravirt-remove-noreplace-paravirt-cmdline-option.patch
new file mode 100644 (file)
index 0000000..a670a21
--- /dev/null
@@ -0,0 +1,91 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Tue, 30 Jan 2018 22:13:33 -0600
+Subject: x86/paravirt: Remove 'noreplace-paravirt' cmdline option
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+
+(cherry picked from commit 12c69f1e94c89d40696e83804dd2f0965b5250cd)
+
+The 'noreplace-paravirt' option disables paravirt patching, leaving the
+original pv indirect calls in place.
+
+That's highly incompatible with retpolines, unless we want to uglify
+paravirt even further and convert the paravirt calls to retpolines.
+
+As far as I can tell, the option doesn't seem to be useful for much
+other than introducing surprising corner cases and making the kernel
+vulnerable to Spectre v2.  It was probably a debug option from the early
+paravirt days.  So just remove it.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Ashok Raj <ashok.raj@intel.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Jun Nakajima <jun.nakajima@intel.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jason Baron <jbaron@akamai.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Alok Kataria <akataria@vmware.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Link: https://lkml.kernel.org/r/20180131041333.2x6blhxirc2kclrq@treble
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/kernel-parameters.txt |    2 --
+ arch/x86/kernel/alternative.c       |   14 --------------
+ 2 files changed, 16 deletions(-)
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2805,8 +2805,6 @@ bytes respectively. Such letter suffixes
+       norandmaps      Don't use address space randomization.  Equivalent to
+                       echo 0 > /proc/sys/kernel/randomize_va_space
+-      noreplace-paravirt      [X86,IA-64,PV_OPS] Don't patch paravirt_ops
+-
+       noreplace-smp   [X86-32,SMP] Don't replace SMP instructions
+                       with UP alternatives
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -46,17 +46,6 @@ static int __init setup_noreplace_smp(ch
+ }
+ __setup("noreplace-smp", setup_noreplace_smp);
+-#ifdef CONFIG_PARAVIRT
+-static int __initdata_or_module noreplace_paravirt = 0;
+-
+-static int __init setup_noreplace_paravirt(char *str)
+-{
+-      noreplace_paravirt = 1;
+-      return 1;
+-}
+-__setup("noreplace-paravirt", setup_noreplace_paravirt);
+-#endif
+-
+ #define DPRINTK(fmt, args...)                                         \
+ do {                                                                  \
+       if (debug_alternative)                                          \
+@@ -588,9 +577,6 @@ void __init_or_module apply_paravirt(str
+       struct paravirt_patch_site *p;
+       char insnbuf[MAX_PATCH_LEN];
+-      if (noreplace_paravirt)
+-              return;
+-
+       for (p = start; p < end; p++) {
+               unsigned int used;
diff --git a/queue-4.9/x86-pti-mark-constant-arrays-as-__initconst.patch b/queue-4.9/x86-pti-mark-constant-arrays-as-__initconst.patch
new file mode 100644 (file)
index 0000000..d9843e5
--- /dev/null
@@ -0,0 +1,53 @@
+From foo@baz Thu Feb  8 03:33:09 CET 2018
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 2 Feb 2018 22:39:23 +0100
+Subject: x86/pti: Mark constant arrays as __initconst
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+
+(cherry picked from commit 4bf5d56d429cbc96c23d809a08f63cd29e1a702e)
+
+I'm seeing build failures from the two newly introduced arrays that
+are marked 'const' and '__initdata', which are mutually exclusive:
+
+arch/x86/kernel/cpu/common.c:882:43: error: 'cpu_no_speculation' causes a section type conflict with 'e820_table_firmware_init'
+arch/x86/kernel/cpu/common.c:895:43: error: 'cpu_no_meltdown' causes a section type conflict with 'e820_table_firmware_init'
+
+The correct annotation is __initconst.
+
+Fixes: fec9434a12f3 ("x86/pti: Do not enable PTI on CPUs which are not vulnerable to Meltdown")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Thomas Garnier <thgarnie@google.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Link: https://lkml.kernel.org/r/20180202213959.611210-1-arnd@arndb.de
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -861,7 +861,7 @@ static void identify_cpu_without_cpuid(s
+ #endif
+ }
+-static const __initdata struct x86_cpu_id cpu_no_speculation[] = {
++static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
+       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_CEDARVIEW,   X86_FEATURE_ANY },
+       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_CLOVERVIEW,  X86_FEATURE_ANY },
+       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_LINCROFT,    X86_FEATURE_ANY },
+@@ -874,7 +874,7 @@ static const __initdata struct x86_cpu_i
+       {}
+ };
+-static const __initdata struct x86_cpu_id cpu_no_meltdown[] = {
++static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
+       { X86_VENDOR_AMD },
+       {}
+ };
diff --git a/queue-4.9/x86-retpoline-avoid-retpolines-for-built-in-__init-functions.patch b/queue-4.9/x86-retpoline-avoid-retpolines-for-built-in-__init-functions.patch
new file mode 100644 (file)
index 0000000..d5ac805
--- /dev/null
@@ -0,0 +1,51 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Thu, 1 Feb 2018 11:27:20 +0000
+Subject: x86/retpoline: Avoid retpolines for built-in __init functions
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+
+(cherry picked from commit 66f793099a636862a71c59d4a6ba91387b155e0c)
+
+There's no point in building init code with retpolines, since it runs before
+any potentially hostile userspace does. And before the retpoline is actually
+ALTERNATIVEd into place, for much of it.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: karahmed@amazon.de
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Link: https://lkml.kernel.org/r/1517484441-1420-2-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/init.h |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/include/linux/init.h
++++ b/include/linux/init.h
+@@ -4,6 +4,13 @@
+ #include <linux/compiler.h>
+ #include <linux/types.h>
++/* Built-in __init functions needn't be compiled with retpoline */
++#if defined(RETPOLINE) && !defined(MODULE)
++#define __noretpoline __attribute__((indirect_branch("keep")))
++#else
++#define __noretpoline
++#endif
++
+ /* These macros are used to mark some functions or 
+  * initialized data (doesn't apply to uninitialized data)
+  * as `initialization' functions. The kernel can take this
+@@ -39,7 +46,7 @@
+ /* These are for everybody (although not all archs will actually
+    discard it in modules) */
+-#define __init                __section(.init.text) __cold notrace __latent_entropy
++#define __init                __section(.init.text) __cold notrace __latent_entropy __noretpoline
+ #define __initdata    __section(.init.data)
+ #define __initconst   __section(.init.rodata)
+ #define __exitdata    __section(.exit.data)
diff --git a/queue-4.9/x86-retpoline-simplify-vmexit_fill_rsb.patch b/queue-4.9/x86-retpoline-simplify-vmexit_fill_rsb.patch
new file mode 100644 (file)
index 0000000..f524811
--- /dev/null
@@ -0,0 +1,248 @@
+From foo@baz Thu Feb  8 03:30:27 CET 2018
+From: Borislav Petkov <bp@alien8.de>
+Date: Sat, 27 Jan 2018 16:24:33 +0000
+Subject: x86/retpoline: Simplify vmexit_fill_RSB()
+
+From: Borislav Petkov <bp@alien8.de>
+
+(cherry picked from commit 1dde7415e99933bb7293d6b2843752cbdb43ec11)
+
+Simplify it to call an asm-function instead of pasting 41 insn bytes at
+every call site. Also, add alignment to the macro as suggested here:
+
+  https://support.google.com/faqs/answer/7625886
+
+[dwmw2: Clean up comments, let it clobber %ebx and just tell the compiler]
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: ak@linux.intel.com
+Cc: dave.hansen@intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1517070274-12128-3-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/entry/entry_32.S             |    3 -
+ arch/x86/entry/entry_64.S             |    3 -
+ arch/x86/include/asm/asm-prototypes.h |    3 +
+ arch/x86/include/asm/nospec-branch.h  |   70 +++-------------------------------
+ arch/x86/lib/Makefile                 |    1 
+ arch/x86/lib/retpoline.S              |   56 +++++++++++++++++++++++++++
+ 6 files changed, 71 insertions(+), 65 deletions(-)
+
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -237,7 +237,8 @@ ENTRY(__switch_to_asm)
+        * exist, overwrite the RSB with entries which capture
+        * speculative execution to prevent attack.
+        */
+-      FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++      /* Clobbers %ebx */
++      FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+ #endif
+       /* restore callee-saved registers */
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -435,7 +435,8 @@ ENTRY(__switch_to_asm)
+        * exist, overwrite the RSB with entries which capture
+        * speculative execution to prevent attack.
+        */
+-      FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++      /* Clobbers %rbx */
++      FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+ #endif
+       /* restore callee-saved registers */
+--- a/arch/x86/include/asm/asm-prototypes.h
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -37,4 +37,7 @@ INDIRECT_THUNK(dx)
+ INDIRECT_THUNK(si)
+ INDIRECT_THUNK(di)
+ INDIRECT_THUNK(bp)
++asmlinkage void __fill_rsb(void);
++asmlinkage void __clear_rsb(void);
++
+ #endif /* CONFIG_RETPOLINE */
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -7,50 +7,6 @@
+ #include <asm/alternative-asm.h>
+ #include <asm/cpufeatures.h>
+-/*
+- * Fill the CPU return stack buffer.
+- *
+- * Each entry in the RSB, if used for a speculative 'ret', contains an
+- * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+- *
+- * This is required in various cases for retpoline and IBRS-based
+- * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+- * eliminate potentially bogus entries from the RSB, and sometimes
+- * purely to ensure that it doesn't get empty, which on some CPUs would
+- * allow predictions from other (unwanted!) sources to be used.
+- *
+- * We define a CPP macro such that it can be used from both .S files and
+- * inline assembly. It's possible to do a .macro and then include that
+- * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
+- */
+-
+-#define RSB_CLEAR_LOOPS               32      /* To forcibly overwrite all entries */
+-#define RSB_FILL_LOOPS                16      /* To avoid underflow */
+-
+-/*
+- * Google experimented with loop-unrolling and this turned out to be
+- * the optimal version â€” two calls, each with their own speculation
+- * trap should their return address end up getting used, in a loop.
+- */
+-#define __FILL_RETURN_BUFFER(reg, nr, sp)     \
+-      mov     $(nr/2), reg;                   \
+-771:                                          \
+-      call    772f;                           \
+-773:  /* speculation trap */                  \
+-      pause;                                  \
+-      lfence;                                 \
+-      jmp     773b;                           \
+-772:                                          \
+-      call    774f;                           \
+-775:  /* speculation trap */                  \
+-      pause;                                  \
+-      lfence;                                 \
+-      jmp     775b;                           \
+-774:                                          \
+-      dec     reg;                            \
+-      jnz     771b;                           \
+-      add     $(BITS_PER_LONG/8) * nr, sp;
+-
+ #ifdef __ASSEMBLY__
+ /*
+@@ -121,17 +77,10 @@
+ #endif
+ .endm
+- /*
+-  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
+-  * monstrosity above, manually.
+-  */
+-.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
++/* This clobbers the BX register */
++.macro FILL_RETURN_BUFFER nr:req ftr:req
+ #ifdef CONFIG_RETPOLINE
+-      ANNOTATE_NOSPEC_ALTERNATIVE
+-      ALTERNATIVE "jmp .Lskip_rsb_\@",                                \
+-              __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP))    \
+-              \ftr
+-.Lskip_rsb_\@:
++      ALTERNATIVE "", "call __clear_rsb", \ftr
+ #endif
+ .endm
+@@ -206,15 +155,10 @@ extern char __indirect_thunk_end[];
+ static inline void vmexit_fill_RSB(void)
+ {
+ #ifdef CONFIG_RETPOLINE
+-      unsigned long loops;
+-
+-      asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
+-                    ALTERNATIVE("jmp 910f",
+-                                __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
+-                                X86_FEATURE_RETPOLINE)
+-                    "910:"
+-                    : "=r" (loops), ASM_CALL_CONSTRAINT
+-                    : : "memory" );
++      alternative_input("",
++                        "call __fill_rsb",
++                        X86_FEATURE_RETPOLINE,
++                        ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
+ #endif
+ }
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -26,6 +26,7 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) +=
+ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
+ lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+ lib-$(CONFIG_RETPOLINE) += retpoline.o
++OBJECT_FILES_NON_STANDARD_retpoline.o :=y
+ obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -7,6 +7,7 @@
+ #include <asm/alternative-asm.h>
+ #include <asm/export.h>
+ #include <asm/nospec-branch.h>
++#include <asm/bitsperlong.h>
+ .macro THUNK reg
+       .section .text.__x86.indirect_thunk
+@@ -46,3 +47,58 @@ GENERATE_THUNK(r13)
+ GENERATE_THUNK(r14)
+ GENERATE_THUNK(r15)
+ #endif
++
++/*
++ * Fill the CPU return stack buffer.
++ *
++ * Each entry in the RSB, if used for a speculative 'ret', contains an
++ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
++ *
++ * This is required in various cases for retpoline and IBRS-based
++ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
++ * eliminate potentially bogus entries from the RSB, and sometimes
++ * purely to ensure that it doesn't get empty, which on some CPUs would
++ * allow predictions from other (unwanted!) sources to be used.
++ *
++ * Google experimented with loop-unrolling and this turned out to be
++ * the optimal version - two calls, each with their own speculation
++ * trap should their return address end up getting used, in a loop.
++ */
++.macro STUFF_RSB nr:req sp:req
++      mov     $(\nr / 2), %_ASM_BX
++      .align 16
++771:
++      call    772f
++773:                                          /* speculation trap */
++      pause
++      lfence
++      jmp     773b
++      .align 16
++772:
++      call    774f
++775:                                          /* speculation trap */
++      pause
++      lfence
++      jmp     775b
++      .align 16
++774:
++      dec     %_ASM_BX
++      jnz     771b
++      add     $((BITS_PER_LONG/8) * \nr), \sp
++.endm
++
++#define RSB_FILL_LOOPS                16      /* To avoid underflow */
++
++ENTRY(__fill_rsb)
++      STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
++      ret
++END(__fill_rsb)
++EXPORT_SYMBOL_GPL(__fill_rsb)
++
++#define RSB_CLEAR_LOOPS               32      /* To forcibly overwrite all entries */
++
++ENTRY(__clear_rsb)
++      STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
++      ret
++END(__clear_rsb)
++EXPORT_SYMBOL_GPL(__clear_rsb)
diff --git a/queue-4.9/x86-spectre-check-config_retpoline-in-command-line-parser.patch b/queue-4.9/x86-spectre-check-config_retpoline-in-command-line-parser.patch
new file mode 100644 (file)
index 0000000..5136879
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Thu Feb  8 03:30:27 CET 2018
+From: Dou Liyang <douly.fnst@cn.fujitsu.com>
+Date: Tue, 30 Jan 2018 14:13:50 +0800
+Subject: x86/spectre: Check CONFIG_RETPOLINE in command line parser
+
+From: Dou Liyang <douly.fnst@cn.fujitsu.com>
+
+(cherry picked from commit 9471eee9186a46893726e22ebb54cade3f9bc043)
+
+The spectre_v2 option 'auto' does not check whether CONFIG_RETPOLINE is
+enabled. As a consequence it fails to emit the appropriate warning and sets
+feature flags which have no effect at all.
+
+Add the missing IS_ENABLED() check.
+
+Fixes: da285121560e ("x86/spectre: Add boot time option to select Spectre v2 mitigation")
+Signed-off-by: Dou Liyang <douly.fnst@cn.fujitsu.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: ak@linux.intel.com
+Cc: peterz@infradead.org
+Cc: Tomohiro" <misono.tomohiro@jp.fujitsu.com>
+Cc: dave.hansen@intel.com
+Cc: bp@alien8.de
+Cc: arjan@linux.intel.com
+Cc: dwmw@amazon.co.uk
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/f5892721-7528-3647-08fb-f8d10e65ad87@cn.fujitsu.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -212,10 +212,10 @@ static void __init spectre_v2_select_mit
+               return;
+       case SPECTRE_V2_CMD_FORCE:
+-              /* FALLTRHU */
+       case SPECTRE_V2_CMD_AUTO:
+-              goto retpoline_auto;
+-
++              if (IS_ENABLED(CONFIG_RETPOLINE))
++                      goto retpoline_auto;
++              break;
+       case SPECTRE_V2_CMD_RETPOLINE_AMD:
+               if (IS_ENABLED(CONFIG_RETPOLINE))
+                       goto retpoline_amd;
diff --git a/queue-4.9/x86-spectre-fix-spelling-mistake-vunerable-vulnerable.patch b/queue-4.9/x86-spectre-fix-spelling-mistake-vunerable-vulnerable.patch
new file mode 100644 (file)
index 0000000..9c55ad7
--- /dev/null
@@ -0,0 +1,38 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 30 Jan 2018 19:32:18 +0000
+Subject: x86/spectre: Fix spelling mistake: "vunerable"-> "vulnerable"
+
+From: Colin Ian King <colin.king@canonical.com>
+
+
+(cherry picked from commit e698dcdfcda41efd0984de539767b4cddd235f1e)
+
+Trivial fix to spelling mistake in pr_err error message text.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: kernel-janitors@vger.kernel.org
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Link: https://lkml.kernel.org/r/20180130193218.9271-1-colin.king@canonical.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -102,7 +102,7 @@ bool retpoline_module_ok(bool has_retpol
+       if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
+               return true;
+-      pr_err("System may be vunerable to spectre v2\n");
++      pr_err("System may be vulnerable to spectre v2\n");
+       spectre_v2_bad_module = true;
+       return false;
+ }
diff --git a/queue-4.9/x86-spectre-report-get_user-mitigation-for-spectre_v1.patch b/queue-4.9/x86-spectre-report-get_user-mitigation-for-spectre_v1.patch
new file mode 100644 (file)
index 0000000..1388be9
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 29 Jan 2018 17:03:21 -0800
+Subject: x86/spectre: Report get_user mitigation for spectre_v1
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+
+(cherry picked from commit edfbae53dab8348fca778531be9f4855d2ca0360)
+
+Reflect the presence of get_user(), __get_user(), and 'syscall' protections
+in sysfs. The expectation is that new and better tooling will allow the
+kernel to grow more usages of array_index_nospec(), for now, only claim
+mitigation for __user pointer de-references.
+
+Reported-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arch@vger.kernel.org
+Cc: kernel-hardening@lists.openwall.com
+Cc: gregkh@linuxfoundation.org
+Cc: torvalds@linux-foundation.org
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727420158.33451.11658324346540434635.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -296,7 +296,7 @@ ssize_t cpu_show_spectre_v1(struct devic
+ {
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
+               return sprintf(buf, "Not affected\n");
+-      return sprintf(buf, "Vulnerable\n");
++      return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+ }
+ ssize_t cpu_show_spectre_v2(struct device *dev,
diff --git a/queue-4.9/x86-spectre-simplify-spectre_v2-command-line-parsing.patch b/queue-4.9/x86-spectre-simplify-spectre_v2-command-line-parsing.patch
new file mode 100644 (file)
index 0000000..bef7737
--- /dev/null
@@ -0,0 +1,138 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: KarimAllah Ahmed <karahmed@amazon.de>
+Date: Thu, 1 Feb 2018 11:27:21 +0000
+Subject: x86/spectre: Simplify spectre_v2 command line parsing
+
+From: KarimAllah Ahmed <karahmed@amazon.de>
+
+
+(cherry picked from commit 9005c6834c0ffdfe46afa76656bd9276cca864f6)
+
+[dwmw2: Use ARRAY_SIZE]
+
+Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Link: https://lkml.kernel.org/r/1517484441-1420-3-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |   84 +++++++++++++++++++++++++++++----------------
+ 1 file changed, 55 insertions(+), 29 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -118,13 +118,13 @@ static inline const char *spectre_v2_mod
+ static void __init spec2_print_if_insecure(const char *reason)
+ {
+       if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+-              pr_info("%s\n", reason);
++              pr_info("%s selected on command line.\n", reason);
+ }
+ static void __init spec2_print_if_secure(const char *reason)
+ {
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+-              pr_info("%s\n", reason);
++              pr_info("%s selected on command line.\n", reason);
+ }
+ static inline bool retp_compiler(void)
+@@ -139,42 +139,68 @@ static inline bool match_option(const ch
+       return len == arglen && !strncmp(arg, opt, len);
+ }
++static const struct {
++      const char *option;
++      enum spectre_v2_mitigation_cmd cmd;
++      bool secure;
++} mitigation_options[] = {
++      { "off",               SPECTRE_V2_CMD_NONE,              false },
++      { "on",                SPECTRE_V2_CMD_FORCE,             true },
++      { "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
++      { "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
++      { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
++      { "auto",              SPECTRE_V2_CMD_AUTO,              false },
++};
++
+ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
+ {
+       char arg[20];
+-      int ret;
++      int ret, i;
++      enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
++
++      if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
++              return SPECTRE_V2_CMD_NONE;
++      else {
++              ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
++                                        sizeof(arg));
++              if (ret < 0)
++                      return SPECTRE_V2_CMD_AUTO;
+-      ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
+-                                sizeof(arg));
+-      if (ret > 0)  {
+-              if (match_option(arg, ret, "off")) {
+-                      goto disable;
+-              } else if (match_option(arg, ret, "on")) {
+-                      spec2_print_if_secure("force enabled on command line.");
+-                      return SPECTRE_V2_CMD_FORCE;
+-              } else if (match_option(arg, ret, "retpoline")) {
+-                      spec2_print_if_insecure("retpoline selected on command line.");
+-                      return SPECTRE_V2_CMD_RETPOLINE;
+-              } else if (match_option(arg, ret, "retpoline,amd")) {
+-                      if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+-                              pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
+-                              return SPECTRE_V2_CMD_AUTO;
+-                      }
+-                      spec2_print_if_insecure("AMD retpoline selected on command line.");
+-                      return SPECTRE_V2_CMD_RETPOLINE_AMD;
+-              } else if (match_option(arg, ret, "retpoline,generic")) {
+-                      spec2_print_if_insecure("generic retpoline selected on command line.");
+-                      return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
+-              } else if (match_option(arg, ret, "auto")) {
++              for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
++                      if (!match_option(arg, ret, mitigation_options[i].option))
++                              continue;
++                      cmd = mitigation_options[i].cmd;
++                      break;
++              }
++
++              if (i >= ARRAY_SIZE(mitigation_options)) {
++                      pr_err("unknown option (%s). Switching to AUTO select\n",
++                             mitigation_options[i].option);
+                       return SPECTRE_V2_CMD_AUTO;
+               }
+       }
+-      if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
++      if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
++           cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
++           cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
++          !IS_ENABLED(CONFIG_RETPOLINE)) {
++              pr_err("%s selected but not compiled in. Switching to AUTO select\n",
++                     mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+-disable:
+-      spec2_print_if_insecure("disabled on command line.");
+-      return SPECTRE_V2_CMD_NONE;
++      }
++
++      if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
++          boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
++              pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
++              return SPECTRE_V2_CMD_AUTO;
++      }
++
++      if (mitigation_options[i].secure)
++              spec2_print_if_secure(mitigation_options[i].option);
++      else
++              spec2_print_if_insecure(mitigation_options[i].option);
++
++      return cmd;
+ }
+ /* Check for Skylake-like CPUs (for RSB handling) */
diff --git a/queue-4.9/x86-speculation-add-basic-ibpb-indirect-branch-prediction-barrier-support.patch b/queue-4.9/x86-speculation-add-basic-ibpb-indirect-branch-prediction-barrier-support.patch
new file mode 100644 (file)
index 0000000..676d99d
--- /dev/null
@@ -0,0 +1,94 @@
+From foo@baz Thu Feb  8 03:30:27 CET 2018
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Thu, 25 Jan 2018 16:14:15 +0000
+Subject: x86/speculation: Add basic IBPB (Indirect Branch Prediction Barrier) support
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+(cherry picked from commit 20ffa1caecca4db8f79fe665acdeaa5af815a24d)
+
+Expose indirect_branch_prediction_barrier() for use in subsequent patches.
+
+[ tglx: Add IBPB status to spectre_v2 sysfs file ]
+
+Co-developed-by: KarimAllah Ahmed <karahmed@amazon.de>
+Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: ak@linux.intel.com
+Cc: ashok.raj@intel.com
+Cc: dave.hansen@intel.com
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1516896855-7642-8-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h   |    2 ++
+ arch/x86/include/asm/nospec-branch.h |   13 +++++++++++++
+ arch/x86/kernel/cpu/bugs.c           |   10 +++++++++-
+ 3 files changed, 24 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -202,6 +202,8 @@
+ /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
+ #define X86_FEATURE_KAISER    ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
++#define X86_FEATURE_IBPB              ( 7*32+21) /* Indirect Branch Prediction Barrier enabled*/
++
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
+ #define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -218,5 +218,18 @@ static inline void vmexit_fill_RSB(void)
+ #endif
+ }
++static inline void indirect_branch_prediction_barrier(void)
++{
++      asm volatile(ALTERNATIVE("",
++                               "movl %[msr], %%ecx\n\t"
++                               "movl %[val], %%eax\n\t"
++                               "movl $0, %%edx\n\t"
++                               "wrmsr",
++                               X86_FEATURE_IBPB)
++                   : : [msr] "i" (MSR_IA32_PRED_CMD),
++                       [val] "i" (PRED_CMD_IBPB)
++                   : "eax", "ecx", "edx", "memory");
++}
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __NOSPEC_BRANCH_H__ */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -262,6 +262,13 @@ retpoline_auto:
+               setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+               pr_info("Filling RSB on context switch\n");
+       }
++
++      /* Initialize Indirect Branch Prediction Barrier if supported */
++      if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) ||
++          boot_cpu_has(X86_FEATURE_AMD_PRED_CMD)) {
++              setup_force_cpu_cap(X86_FEATURE_IBPB);
++              pr_info("Enabling Indirect Branch Prediction Barrier\n");
++      }
+ }
+ #undef pr_fmt
+@@ -291,7 +298,8 @@ ssize_t cpu_show_spectre_v2(struct devic
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+               return sprintf(buf, "Not affected\n");
+-      return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++      return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++                     boot_cpu_has(X86_FEATURE_IBPB) ? ", IPBP" : "",
+                      spectre_v2_bad_module ? " - vulnerable module loaded" : "");
+ }
+ #endif
diff --git a/queue-4.9/x86-speculation-fix-typo-ibrs_att-which-should-be-ibrs_all.patch b/queue-4.9/x86-speculation-fix-typo-ibrs_att-which-should-be-ibrs_all.patch
new file mode 100644 (file)
index 0000000..2446b18
--- /dev/null
@@ -0,0 +1,38 @@
+From foo@baz Thu Feb  8 03:33:09 CET 2018
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Fri, 2 Feb 2018 19:12:20 +0000
+Subject: x86/speculation: Fix typo IBRS_ATT, which should be IBRS_ALL
+
+From: Darren Kenny <darren.kenny@oracle.com>
+
+
+(cherry picked from commit af189c95a371b59f493dbe0f50c0a09724868881)
+
+Fixes: 117cc7a908c83 ("x86/retpoline: Fill return stack buffer on vmexit")
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Link: https://lkml.kernel.org/r/20180202191220.blvgkgutojecxr3b@starbug-vm.ie.oracle.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -150,7 +150,7 @@ extern char __indirect_thunk_end[];
+  * On VMEXIT we must ensure that no RSB predictions learned in the guest
+  * can be followed in the host, by overwriting the RSB completely. Both
+  * retpoline and IBRS mitigations for Spectre v2 need this; only on future
+- * CPUs with IBRS_ATT *might* it be avoided.
++ * CPUs with IBRS_ALL *might* it be avoided.
+  */
+ static inline void vmexit_fill_RSB(void)
+ {
diff --git a/queue-4.9/x86-syscall-sanitize-syscall-table-de-references-under-speculation.patch b/queue-4.9/x86-syscall-sanitize-syscall-table-de-references-under-speculation.patch
new file mode 100644 (file)
index 0000000..cce824f
--- /dev/null
@@ -0,0 +1,61 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 29 Jan 2018 17:02:59 -0800
+Subject: x86/syscall: Sanitize syscall table de-references under speculation
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+
+(cherry picked from commit 2fbd7af5af8665d18bcefae3e9700be07e22b681)
+
+The syscall table base is a user controlled function pointer in kernel
+space. Use array_index_nospec() to prevent any out of bounds speculation.
+
+While retpoline prevents speculating into a userspace directed target it
+does not stop the pointer de-reference, the concern is leaking memory
+relative to the syscall table base, by observing instruction cache
+behavior.
+
+Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arch@vger.kernel.org
+Cc: kernel-hardening@lists.openwall.com
+Cc: gregkh@linuxfoundation.org
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727417984.33451.1216731042505722161.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/entry/common.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -20,6 +20,7 @@
+ #include <linux/export.h>
+ #include <linux/context_tracking.h>
+ #include <linux/user-return-notifier.h>
++#include <linux/nospec.h>
+ #include <linux/uprobes.h>
+ #include <asm/desc.h>
+@@ -277,7 +278,8 @@ __visible void do_syscall_64(struct pt_r
+        * regs->orig_ax, which changes the behavior of some syscalls.
+        */
+       if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
+-              regs->ax = sys_call_table[nr & __SYSCALL_MASK](
++              nr = array_index_nospec(nr & __SYSCALL_MASK, NR_syscalls);
++              regs->ax = sys_call_table[nr](
+                       regs->di, regs->si, regs->dx,
+                       regs->r10, regs->r8, regs->r9);
+       }
+@@ -313,6 +315,7 @@ static __always_inline void do_syscall_3
+       }
+       if (likely(nr < IA32_NR_syscalls)) {
++              nr = array_index_nospec(nr, IA32_NR_syscalls);
+               /*
+                * It's possible that a 32-bit syscall implementation
+                * takes a 64-bit parameter but nonetheless assumes that
diff --git a/queue-4.9/x86-uaccess-use-__uaccess_begin_nospec-and-uaccess_try_nospec.patch b/queue-4.9/x86-uaccess-use-__uaccess_begin_nospec-and-uaccess_try_nospec.patch
new file mode 100644 (file)
index 0000000..4a3fa28
--- /dev/null
@@ -0,0 +1,187 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 29 Jan 2018 17:02:49 -0800
+Subject: x86/uaccess: Use __uaccess_begin_nospec() and uaccess_try_nospec
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+
+(cherry picked from commit 304ec1b050310548db33063e567123fae8fd0301)
+
+Quoting Linus:
+
+    I do think that it would be a good idea to very expressly document
+    the fact that it's not that the user access itself is unsafe. I do
+    agree that things like "get_user()" want to be protected, but not
+    because of any direct bugs or problems with get_user() and friends,
+    but simply because get_user() is an excellent source of a pointer
+    that is obviously controlled from a potentially attacking user
+    space. So it's a prime candidate for then finding _subsequent_
+    accesses that can then be used to perturb the cache.
+
+__uaccess_begin_nospec() covers __get_user() and copy_from_iter() where the
+limit check is far away from the user pointer de-reference. In those cases
+a barrier_nospec() prevents speculation with a potential pointer to
+privileged memory. uaccess_try_nospec covers get_user_try.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Suggested-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arch@vger.kernel.org
+Cc: Kees Cook <keescook@chromium.org>
+Cc: kernel-hardening@lists.openwall.com
+Cc: gregkh@linuxfoundation.org
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727416953.33451.10508284228526170604.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/uaccess.h    |    6 +++---
+ arch/x86/include/asm/uaccess_32.h |   12 ++++++------
+ arch/x86/include/asm/uaccess_64.h |   12 ++++++------
+ arch/x86/lib/usercopy_32.c        |    4 ++--
+ 4 files changed, 17 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -437,7 +437,7 @@ do {                                                                       \
+ ({                                                                    \
+       int __gu_err;                                                   \
+       __inttype(*(ptr)) __gu_val;                                     \
+-      __uaccess_begin();                                              \
++      __uaccess_begin_nospec();                                       \
+       __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
+       __uaccess_end();                                                \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
+@@ -547,7 +547,7 @@ struct __large_struct { unsigned long bu
+  *    get_user_ex(...);
+  * } get_user_catch(err)
+  */
+-#define get_user_try          uaccess_try
++#define get_user_try          uaccess_try_nospec
+ #define get_user_catch(err)   uaccess_catch(err)
+ #define get_user_ex(x, ptr)   do {                                    \
+@@ -582,7 +582,7 @@ extern void __cmpxchg_wrong_size(void)
+       __typeof__(ptr) __uval = (uval);                                \
+       __typeof__(*(ptr)) __old = (old);                               \
+       __typeof__(*(ptr)) __new = (new);                               \
+-      __uaccess_begin();                                              \
++      __uaccess_begin_nospec();                                       \
+       switch (size) {                                                 \
+       case 1:                                                         \
+       {                                                               \
+--- a/arch/x86/include/asm/uaccess_32.h
++++ b/arch/x86/include/asm/uaccess_32.h
+@@ -102,17 +102,17 @@ __copy_from_user(void *to, const void __
+               switch (n) {
+               case 1:
+-                      __uaccess_begin();
++                      __uaccess_begin_nospec();
+                       __get_user_size(*(u8 *)to, from, 1, ret, 1);
+                       __uaccess_end();
+                       return ret;
+               case 2:
+-                      __uaccess_begin();
++                      __uaccess_begin_nospec();
+                       __get_user_size(*(u16 *)to, from, 2, ret, 2);
+                       __uaccess_end();
+                       return ret;
+               case 4:
+-                      __uaccess_begin();
++                      __uaccess_begin_nospec();
+                       __get_user_size(*(u32 *)to, from, 4, ret, 4);
+                       __uaccess_end();
+                       return ret;
+@@ -130,17 +130,17 @@ static __always_inline unsigned long __c
+               switch (n) {
+               case 1:
+-                      __uaccess_begin();
++                      __uaccess_begin_nospec();
+                       __get_user_size(*(u8 *)to, from, 1, ret, 1);
+                       __uaccess_end();
+                       return ret;
+               case 2:
+-                      __uaccess_begin();
++                      __uaccess_begin_nospec();
+                       __get_user_size(*(u16 *)to, from, 2, ret, 2);
+                       __uaccess_end();
+                       return ret;
+               case 4:
+-                      __uaccess_begin();
++                      __uaccess_begin_nospec();
+                       __get_user_size(*(u32 *)to, from, 4, ret, 4);
+                       __uaccess_end();
+                       return ret;
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -59,31 +59,31 @@ int __copy_from_user_nocheck(void *dst,
+               return copy_user_generic(dst, (__force void *)src, size);
+       switch (size) {
+       case 1:
+-              __uaccess_begin();
++              __uaccess_begin_nospec();
+               __get_user_asm(*(u8 *)dst, (u8 __user *)src,
+                             ret, "b", "b", "=q", 1);
+               __uaccess_end();
+               return ret;
+       case 2:
+-              __uaccess_begin();
++              __uaccess_begin_nospec();
+               __get_user_asm(*(u16 *)dst, (u16 __user *)src,
+                             ret, "w", "w", "=r", 2);
+               __uaccess_end();
+               return ret;
+       case 4:
+-              __uaccess_begin();
++              __uaccess_begin_nospec();
+               __get_user_asm(*(u32 *)dst, (u32 __user *)src,
+                             ret, "l", "k", "=r", 4);
+               __uaccess_end();
+               return ret;
+       case 8:
+-              __uaccess_begin();
++              __uaccess_begin_nospec();
+               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+                             ret, "q", "", "=r", 8);
+               __uaccess_end();
+               return ret;
+       case 10:
+-              __uaccess_begin();
++              __uaccess_begin_nospec();
+               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+                              ret, "q", "", "=r", 10);
+               if (likely(!ret))
+@@ -93,7 +93,7 @@ int __copy_from_user_nocheck(void *dst,
+               __uaccess_end();
+               return ret;
+       case 16:
+-              __uaccess_begin();
++              __uaccess_begin_nospec();
+               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+                              ret, "q", "", "=r", 16);
+               if (likely(!ret))
+--- a/arch/x86/lib/usercopy_32.c
++++ b/arch/x86/lib/usercopy_32.c
+@@ -570,7 +570,7 @@ do {                                                                       \
+ unsigned long __copy_to_user_ll(void __user *to, const void *from,
+                               unsigned long n)
+ {
+-      __uaccess_begin();
++      __uaccess_begin_nospec();
+       if (movsl_is_ok(to, from, n))
+               __copy_user(to, from, n);
+       else
+@@ -627,7 +627,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocach
+ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
+                                       unsigned long n)
+ {
+-      __uaccess_begin();
++      __uaccess_begin_nospec();
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+       if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
+               n = __copy_user_intel_nocache(to, from, n);
diff --git a/queue-4.9/x86-usercopy-replace-open-coded-stac-clac-with-__uaccess_-begin-end.patch b/queue-4.9/x86-usercopy-replace-open-coded-stac-clac-with-__uaccess_-begin-end.patch
new file mode 100644 (file)
index 0000000..bf11e7c
--- /dev/null
@@ -0,0 +1,70 @@
+From foo@baz Thu Feb  8 03:32:24 CET 2018
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 29 Jan 2018 17:02:44 -0800
+Subject: x86/usercopy: Replace open coded stac/clac with __uaccess_{begin, end}
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+
+(cherry picked from commit b5c4ae4f35325d520b230bab6eb3310613b72ac1)
+
+In preparation for converting some __uaccess_begin() instances to
+__uacess_begin_nospec(), make sure all 'from user' uaccess paths are
+using the _begin(), _end() helpers rather than open-coded stac() and
+clac().
+
+No functional changes.
+
+Suggested-by: Ingo Molnar <mingo@redhat.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arch@vger.kernel.org
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: kernel-hardening@lists.openwall.com
+Cc: gregkh@linuxfoundation.org
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: torvalds@linux-foundation.org
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727416438.33451.17309465232057176966.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/lib/usercopy_32.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/lib/usercopy_32.c
++++ b/arch/x86/lib/usercopy_32.c
+@@ -570,12 +570,12 @@ do {                                                                     \
+ unsigned long __copy_to_user_ll(void __user *to, const void *from,
+                               unsigned long n)
+ {
+-      stac();
++      __uaccess_begin();
+       if (movsl_is_ok(to, from, n))
+               __copy_user(to, from, n);
+       else
+               n = __copy_user_intel(to, from, n);
+-      clac();
++      __uaccess_end();
+       return n;
+ }
+ EXPORT_SYMBOL(__copy_to_user_ll);
+@@ -627,7 +627,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocach
+ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
+                                       unsigned long n)
+ {
+-      stac();
++      __uaccess_begin();
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+       if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
+               n = __copy_user_intel_nocache(to, from, n);
+@@ -636,7 +636,7 @@ unsigned long __copy_from_user_ll_nocach
+ #else
+       __copy_user(to, from, n);
+ #endif
+-      clac();
++      __uaccess_end();
+       return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);