]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 20 Feb 2018 10:27:26 +0000 (11:27 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 20 Feb 2018 10:27:26 +0000 (11:27 +0100)
added patches:
nospec-move-array_index_nospec-parameter-checking-into-separate-macro.patch
selftests-x86-do-not-rely-on-int-0x80-in-single_step_syscall.c.patch
selftests-x86-do-not-rely-on-int-0x80-in-test_mremap_vdso.c.patch
selftests-x86-mpx-fix-incorrect-bounds-with-old-_sigfault.patch
selftests-x86-pkeys-remove-unused-functions.patch
x86-cpu-change-type-of-x86_cache_size-variable-to-unsigned-int.patch
x86-cpu-rename-cpu_data.x86_mask-to-cpu_data.x86_stepping.patch
x86-spectre-fix-an-error-message.patch
x86-speculation-add-asm-msr-index.h-dependency.patch
x86-speculation-fix-up-array_index_nospec_mask-asm-constraint.patch

queue-4.9/nospec-move-array_index_nospec-parameter-checking-into-separate-macro.patch [new file with mode: 0644]
queue-4.9/selftests-x86-do-not-rely-on-int-0x80-in-single_step_syscall.c.patch [new file with mode: 0644]
queue-4.9/selftests-x86-do-not-rely-on-int-0x80-in-test_mremap_vdso.c.patch [new file with mode: 0644]
queue-4.9/selftests-x86-mpx-fix-incorrect-bounds-with-old-_sigfault.patch [new file with mode: 0644]
queue-4.9/selftests-x86-pkeys-remove-unused-functions.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/x86-cpu-change-type-of-x86_cache_size-variable-to-unsigned-int.patch [new file with mode: 0644]
queue-4.9/x86-cpu-rename-cpu_data.x86_mask-to-cpu_data.x86_stepping.patch [new file with mode: 0644]
queue-4.9/x86-spectre-fix-an-error-message.patch [new file with mode: 0644]
queue-4.9/x86-speculation-add-asm-msr-index.h-dependency.patch [new file with mode: 0644]
queue-4.9/x86-speculation-fix-up-array_index_nospec_mask-asm-constraint.patch [new file with mode: 0644]

diff --git a/queue-4.9/nospec-move-array_index_nospec-parameter-checking-into-separate-macro.patch b/queue-4.9/nospec-move-array_index_nospec-parameter-checking-into-separate-macro.patch
new file mode 100644 (file)
index 0000000..9977030
--- /dev/null
@@ -0,0 +1,89 @@
+From 8fa80c503b484ddc1abbd10c7cb2ab81f3824a50 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 5 Feb 2018 14:16:06 +0000
+Subject: nospec: Move array_index_nospec() parameter checking into separate macro
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 8fa80c503b484ddc1abbd10c7cb2ab81f3824a50 upstream.
+
+For architectures providing their own implementation of
+array_index_mask_nospec() in asm/barrier.h, attempting to use WARN_ONCE() to
+complain about out-of-range parameters using WARN_ON() results in a mess
+of mutually-dependent include files.
+
+Rather than unpick the dependencies, simply have the core code in nospec.h
+perform the checking for us.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/1517840166-15399-1-git-send-email-will.deacon@arm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/nospec.h |   36 +++++++++++++++++++++---------------
+ 1 file changed, 21 insertions(+), 15 deletions(-)
+
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -20,20 +20,6 @@ static inline unsigned long array_index_
+                                                   unsigned long size)
+ {
+       /*
+-       * Warn developers about inappropriate array_index_nospec() usage.
+-       *
+-       * Even if the CPU speculates past the WARN_ONCE branch, the
+-       * sign bit of @index is taken into account when generating the
+-       * mask.
+-       *
+-       * This warning is compiled out when the compiler can infer that
+-       * @index and @size are less than LONG_MAX.
+-       */
+-      if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,
+-                      "array_index_nospec() limited to range of [0, LONG_MAX]\n"))
+-              return 0;
+-
+-      /*
+        * Always calculate and emit the mask even if the compiler
+        * thinks the mask is not needed. The compiler does not take
+        * into account the value of @index under speculation.
+@@ -44,6 +30,26 @@ static inline unsigned long array_index_
+ #endif
+ /*
++ * Warn developers about inappropriate array_index_nospec() usage.
++ *
++ * Even if the CPU speculates past the WARN_ONCE branch, the
++ * sign bit of @index is taken into account when generating the
++ * mask.
++ *
++ * This warning is compiled out when the compiler can infer that
++ * @index and @size are less than LONG_MAX.
++ */
++#define array_index_mask_nospec_check(index, size)                            \
++({                                                                            \
++      if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,                      \
++          "array_index_nospec() limited to range of [0, LONG_MAX]\n"))        \
++              _mask = 0;                                                      \
++      else                                                                    \
++              _mask = array_index_mask_nospec(index, size);                   \
++      _mask;                                                                  \
++})
++
++/*
+  * array_index_nospec - sanitize an array index after a bounds check
+  *
+  * For a code sequence like:
+@@ -61,7 +67,7 @@ static inline unsigned long array_index_
+ ({                                                                    \
+       typeof(index) _i = (index);                                     \
+       typeof(size) _s = (size);                                       \
+-      unsigned long _mask = array_index_mask_nospec(_i, _s);          \
++      unsigned long _mask = array_index_mask_nospec_check(_i, _s);    \
+                                                                       \
+       BUILD_BUG_ON(sizeof(_i) > sizeof(long));                        \
+       BUILD_BUG_ON(sizeof(_s) > sizeof(long));                        \
diff --git a/queue-4.9/selftests-x86-do-not-rely-on-int-0x80-in-single_step_syscall.c.patch b/queue-4.9/selftests-x86-do-not-rely-on-int-0x80-in-single_step_syscall.c.patch
new file mode 100644 (file)
index 0000000..627e274
--- /dev/null
@@ -0,0 +1,75 @@
+From 4105c69703cdeba76f384b901712c9397b04e9c2 Mon Sep 17 00:00:00 2001
+From: Dominik Brodowski <linux@dominikbrodowski.net>
+Date: Tue, 13 Feb 2018 09:13:21 +0100
+Subject: selftests/x86: Do not rely on "int $0x80" in single_step_syscall.c
+
+From: Dominik Brodowski <linux@dominikbrodowski.net>
+
+commit 4105c69703cdeba76f384b901712c9397b04e9c2 upstream.
+
+On 64-bit builds, we should not rely on "int $0x80" working (it only does if
+CONFIG_IA32_EMULATION=y is enabled). To keep the "Set TF and check int80"
+test running on 64-bit installs with CONFIG_IA32_EMULATION=y enabled, build
+this test only if we can also build 32-bit binaries (which should be a
+good approximation for that).
+
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Dmitry Safonov <dsafonov@virtuozzo.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-kselftest@vger.kernel.org
+Cc: shuah@kernel.org
+Link: http://lkml.kernel.org/r/20180211111013.16888-5-linux@dominikbrodowski.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/x86/Makefile              |    2 ++
+ tools/testing/selftests/x86/single_step_syscall.c |    5 ++++-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/x86/Makefile
++++ b/tools/testing/selftests/x86/Makefile
+@@ -26,11 +26,13 @@ CAN_BUILD_X86_64 := $(shell ./check_cc.s
+ ifeq ($(CAN_BUILD_I386),1)
+ all: all_32
+ TEST_PROGS += $(BINARIES_32)
++EXTRA_CFLAGS += -DCAN_BUILD_32
+ endif
+ ifeq ($(CAN_BUILD_X86_64),1)
+ all: all_64
+ TEST_PROGS += $(BINARIES_64)
++EXTRA_CFLAGS += -DCAN_BUILD_64
+ endif
+ all_32: $(BINARIES_32)
+--- a/tools/testing/selftests/x86/single_step_syscall.c
++++ b/tools/testing/selftests/x86/single_step_syscall.c
+@@ -119,7 +119,9 @@ static void check_result(void)
+ int main()
+ {
++#ifdef CAN_BUILD_32
+       int tmp;
++#endif
+       sethandler(SIGTRAP, sigtrap, 0);
+@@ -139,12 +141,13 @@ int main()
+                     : : "c" (post_nop) : "r11");
+       check_result();
+ #endif
+-
++#ifdef CAN_BUILD_32
+       printf("[RUN]\tSet TF and check int80\n");
+       set_eflags(get_eflags() | X86_EFLAGS_TF);
+       asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
+                       : INT80_CLOBBERS);
+       check_result();
++#endif
+       /*
+        * This test is particularly interesting if fast syscalls use
diff --git a/queue-4.9/selftests-x86-do-not-rely-on-int-0x80-in-test_mremap_vdso.c.patch b/queue-4.9/selftests-x86-do-not-rely-on-int-0x80-in-test_mremap_vdso.c.patch
new file mode 100644 (file)
index 0000000..32a30d5
--- /dev/null
@@ -0,0 +1,46 @@
+From 2cbc0d66de0480449c75636f55697c7ff3af61fc Mon Sep 17 00:00:00 2001
+From: Dominik Brodowski <linux@dominikbrodowski.net>
+Date: Sun, 11 Feb 2018 12:10:11 +0100
+Subject: selftests/x86: Do not rely on "int $0x80" in test_mremap_vdso.c
+
+From: Dominik Brodowski <linux@dominikbrodowski.net>
+
+commit 2cbc0d66de0480449c75636f55697c7ff3af61fc upstream.
+
+On 64-bit builds, we should not rely on "int $0x80" working (it only does if
+CONFIG_IA32_EMULATION=y is enabled).
+
+Without this patch, the move test may succeed, but the "int $0x80" causes
+a segfault, resulting in a false negative output of this self-test.
+
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Dmitry Safonov <dsafonov@virtuozzo.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-kselftest@vger.kernel.org
+Cc: shuah@kernel.org
+Link: http://lkml.kernel.org/r/20180211111013.16888-4-linux@dominikbrodowski.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/x86/test_mremap_vdso.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/tools/testing/selftests/x86/test_mremap_vdso.c
++++ b/tools/testing/selftests/x86/test_mremap_vdso.c
+@@ -90,8 +90,12 @@ int main(int argc, char **argv, char **e
+                       vdso_size += PAGE_SIZE;
+               }
++#ifdef __i386__
+               /* Glibc is likely to explode now - exit with raw syscall */
+               asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret));
++#else /* __x86_64__ */
++              syscall(SYS_exit, ret);
++#endif
+       } else {
+               int status;
diff --git a/queue-4.9/selftests-x86-mpx-fix-incorrect-bounds-with-old-_sigfault.patch b/queue-4.9/selftests-x86-mpx-fix-incorrect-bounds-with-old-_sigfault.patch
new file mode 100644 (file)
index 0000000..cfa8a98
--- /dev/null
@@ -0,0 +1,91 @@
+From 961888b1d76d84efc66a8f5604b06ac12ac2f978 Mon Sep 17 00:00:00 2001
+From: Rui Wang <rui.y.wang@intel.com>
+Date: Mon, 18 Dec 2017 16:34:10 +0800
+Subject: selftests/x86/mpx: Fix incorrect bounds with old _sigfault
+
+From: Rui Wang <rui.y.wang@intel.com>
+
+commit 961888b1d76d84efc66a8f5604b06ac12ac2f978 upstream.
+
+For distributions with old userspace header files, the _sigfault
+structure is different. mpx-mini-test fails with the following
+error:
+
+  [root@Purley]# mpx-mini-test_64 tabletest
+  XSAVE is supported by HW & OS
+  XSAVE processor supported state mask: 0x2ff
+  XSAVE OS supported state mask: 0x2ff
+   BNDREGS: size: 64 user: 1 supervisor: 0 aligned: 0
+    BNDCSR: size: 64 user: 1 supervisor: 0 aligned: 0
+  starting mpx bounds table test
+  ERROR: siginfo bounds do not match shadow bounds for register 0
+
+Fix it by using the correct offset of _lower/_upper in _sigfault.
+RHEL needs this patch to work.
+
+Signed-off-by: Rui Wang <rui.y.wang@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: dave.hansen@linux.intel.com
+Fixes: e754aedc26ef ("x86/mpx, selftests: Add MPX self test")
+Link: http://lkml.kernel.org/r/1513586050-1641-1-git-send-email-rui.y.wang@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/x86/mpx-mini-test.c |   32 ++++++++++++++++++++++++++--
+ 1 file changed, 30 insertions(+), 2 deletions(-)
+
+--- a/tools/testing/selftests/x86/mpx-mini-test.c
++++ b/tools/testing/selftests/x86/mpx-mini-test.c
+@@ -315,11 +315,39 @@ static inline void *__si_bounds_upper(si
+       return si->si_upper;
+ }
+ #else
++
++/*
++ * This deals with old version of _sigfault in some distros:
++ *
++
++old _sigfault:
++        struct {
++            void *si_addr;
++      } _sigfault;
++
++new _sigfault:
++      struct {
++              void __user *_addr;
++              int _trapno;
++              short _addr_lsb;
++              union {
++                      struct {
++                              void __user *_lower;
++                              void __user *_upper;
++                      } _addr_bnd;
++                      __u32 _pkey;
++              };
++      } _sigfault;
++ *
++ */
++
+ static inline void **__si_bounds_hack(siginfo_t *si)
+ {
+       void *sigfault = &si->_sifields._sigfault;
+       void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault);
+-      void **__si_lower = end_sigfault;
++      int *trapno = (int*)end_sigfault;
++      /* skip _trapno and _addr_lsb */
++      void **__si_lower = (void**)(trapno + 2);
+       return __si_lower;
+ }
+@@ -331,7 +359,7 @@ static inline void *__si_bounds_lower(si
+ static inline void *__si_bounds_upper(siginfo_t *si)
+ {
+-      return (*__si_bounds_hack(si)) + sizeof(void *);
++      return *(__si_bounds_hack(si) + 1);
+ }
+ #endif
diff --git a/queue-4.9/selftests-x86-pkeys-remove-unused-functions.patch b/queue-4.9/selftests-x86-pkeys-remove-unused-functions.patch
new file mode 100644 (file)
index 0000000..1b45d21
--- /dev/null
@@ -0,0 +1,70 @@
+From ce676638fe7b284132a7d7d5e7e7ad81bab9947e Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@kernel.org>
+Date: Tue, 13 Feb 2018 08:26:17 +0100
+Subject: selftests/x86/pkeys: Remove unused functions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ingo Molnar <mingo@kernel.org>
+
+commit ce676638fe7b284132a7d7d5e7e7ad81bab9947e upstream.
+
+This also gets rid of two build warnings:
+
+  protection_keys.c: In function ‘dumpit’:
+  protection_keys.c:419:3: warning: ignoring return value of ‘write’, declared with attribute warn_unused_result [-Wunused-result]
+     write(1, buf, nr_read);
+     ^~~~~~~~~~~~~~~~~~~~~~
+
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Shuah Khan <shuahkh@osg.samsung.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/x86/protection_keys.c |   28 --------------------------
+ 1 file changed, 28 deletions(-)
+
+--- a/tools/testing/selftests/x86/protection_keys.c
++++ b/tools/testing/selftests/x86/protection_keys.c
+@@ -381,34 +381,6 @@ pid_t fork_lazy_child(void)
+       return forkret;
+ }
+-void davecmp(void *_a, void *_b, int len)
+-{
+-      int i;
+-      unsigned long *a = _a;
+-      unsigned long *b = _b;
+-
+-      for (i = 0; i < len / sizeof(*a); i++) {
+-              if (a[i] == b[i])
+-                      continue;
+-
+-              dprintf3("[%3d]: a: %016lx b: %016lx\n", i, a[i], b[i]);
+-      }
+-}
+-
+-void dumpit(char *f)
+-{
+-      int fd = open(f, O_RDONLY);
+-      char buf[100];
+-      int nr_read;
+-
+-      dprintf2("maps fd: %d\n", fd);
+-      do {
+-              nr_read = read(fd, &buf[0], sizeof(buf));
+-              write(1, buf, nr_read);
+-      } while (nr_read > 0);
+-      close(fd);
+-}
+-
+ #define PKEY_DISABLE_ACCESS    0x1
+ #define PKEY_DISABLE_WRITE     0x2
index c179b2634f36c51285a3cb831723ba0f113682aa..1b6cf8f72c474586fb2a55a15506e05911c4baad 100644 (file)
@@ -22,3 +22,13 @@ x86-speculation-correct-speculation-control-microcode-blacklist-again.patch
 kvm-x86-reduce-retpoline-performance-impact-in-slot_handle_level_range-by-always-inlining-iterator-helper-methods.patch
 x86-nvmx-properly-set-spec_ctrl-and-pred_cmd-before-merging-msrs.patch
 x86-speculation-clean-up-various-spectre-related-details.patch
+selftests-x86-pkeys-remove-unused-functions.patch
+selftests-x86-do-not-rely-on-int-0x80-in-test_mremap_vdso.c.patch
+selftests-x86-do-not-rely-on-int-0x80-in-single_step_syscall.c.patch
+x86-speculation-fix-up-array_index_nospec_mask-asm-constraint.patch
+nospec-move-array_index_nospec-parameter-checking-into-separate-macro.patch
+x86-speculation-add-asm-msr-index.h-dependency.patch
+selftests-x86-mpx-fix-incorrect-bounds-with-old-_sigfault.patch
+x86-cpu-rename-cpu_data.x86_mask-to-cpu_data.x86_stepping.patch
+x86-spectre-fix-an-error-message.patch
+x86-cpu-change-type-of-x86_cache_size-variable-to-unsigned-int.patch
diff --git a/queue-4.9/x86-cpu-change-type-of-x86_cache_size-variable-to-unsigned-int.patch b/queue-4.9/x86-cpu-change-type-of-x86_cache_size-variable-to-unsigned-int.patch
new file mode 100644 (file)
index 0000000..a6b8487
--- /dev/null
@@ -0,0 +1,77 @@
+From 24dbc6000f4b9b0ef5a9daecb161f1907733765a Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <garsilva@embeddedor.com>
+Date: Tue, 13 Feb 2018 13:22:08 -0600
+Subject: x86/cpu: Change type of x86_cache_size variable to unsigned int
+
+From: Gustavo A. R. Silva <garsilva@embeddedor.com>
+
+commit 24dbc6000f4b9b0ef5a9daecb161f1907733765a upstream.
+
+Currently, x86_cache_size is of type int, which makes no sense as we
+will never have a valid cache size equal or less than 0. So instead of
+initializing this variable to -1, it can perfectly be initialized to 0
+and use it as an unsigned variable instead.
+
+Suggested-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Gustavo A. R. Silva <garsilva@embeddedor.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Addresses-Coverity-ID: 1464429
+Link: http://lkml.kernel.org/r/20180213192208.GA26414@embeddedor.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/processor.h      |    2 +-
+ arch/x86/kernel/cpu/common.c          |    2 +-
+ arch/x86/kernel/cpu/microcode/intel.c |    2 +-
+ arch/x86/kernel/cpu/proc.c            |    4 ++--
+ 4 files changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -113,7 +113,7 @@ struct cpuinfo_x86 {
+       char                    x86_vendor_id[16];
+       char                    x86_model_id[64];
+       /* in KB - valid for CPUS which support this call: */
+-      int                     x86_cache_size;
++      unsigned int            x86_cache_size;
+       int                     x86_cache_alignment;    /* In bytes */
+       /* Cache QoS architectural values: */
+       int                     x86_cache_max_rmid;     /* max index */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1144,7 +1144,7 @@ static void identify_cpu(struct cpuinfo_
+       int i;
+       c->loops_per_jiffy = loops_per_jiffy;
+-      c->x86_cache_size = -1;
++      c->x86_cache_size = 0;
+       c->x86_vendor = X86_VENDOR_UNKNOWN;
+       c->x86_model = c->x86_stepping = 0;     /* So far unknown... */
+       c->x86_vendor_id[0] = '\0'; /* Unset */
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -1132,7 +1132,7 @@ static struct microcode_ops microcode_in
+ static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
+ {
+-      u64 llc_size = c->x86_cache_size * 1024;
++      u64 llc_size = c->x86_cache_size * 1024ULL;
+       do_div(llc_size, c->x86_max_cores);
+--- a/arch/x86/kernel/cpu/proc.c
++++ b/arch/x86/kernel/cpu/proc.c
+@@ -87,8 +87,8 @@ static int show_cpuinfo(struct seq_file
+       }
+       /* Cache size */
+-      if (c->x86_cache_size >= 0)
+-              seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
++      if (c->x86_cache_size)
++              seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size);
+       show_cpuinfo_core(m, c, cpu);
+       show_cpuinfo_misc(m, c);
diff --git a/queue-4.9/x86-cpu-rename-cpu_data.x86_mask-to-cpu_data.x86_stepping.patch b/queue-4.9/x86-cpu-rename-cpu_data.x86_mask-to-cpu_data.x86_stepping.patch
new file mode 100644 (file)
index 0000000..20e7eaf
--- /dev/null
@@ -0,0 +1,700 @@
+From b399151cb48db30ad1e0e93dd40d68c6d007b637 Mon Sep 17 00:00:00 2001
+From: Jia Zhang <qianyue.zj@alibaba-inc.com>
+Date: Mon, 1 Jan 2018 09:52:10 +0800
+Subject: x86/cpu: Rename cpu_data.x86_mask to cpu_data.x86_stepping
+
+From: Jia Zhang <qianyue.zj@alibaba-inc.com>
+
+commit b399151cb48db30ad1e0e93dd40d68c6d007b637 upstream.
+
+x86_mask is a confusing name which is hard to associate with the
+processor's stepping.
+
+Additionally, correct an indent issue in lib/cpu.c.
+
+Signed-off-by: Jia Zhang <qianyue.zj@alibaba-inc.com>
+[ Updated it to more recent kernels. ]
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: bp@alien8.de
+Cc: tony.luck@intel.com
+Link: http://lkml.kernel.org/r/1514771530-70829-1-git-send-email-qianyue.zj@alibaba-inc.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/core.c          |    2 +-
+ arch/x86/events/intel/lbr.c           |    2 +-
+ arch/x86/events/intel/p6.c            |    2 +-
+ arch/x86/include/asm/acpi.h           |    2 +-
+ arch/x86/include/asm/processor.h      |    2 +-
+ arch/x86/kernel/amd_nb.c              |    2 +-
+ arch/x86/kernel/asm-offsets_32.c      |    2 +-
+ arch/x86/kernel/cpu/amd.c             |   26 +++++++++++++-------------
+ arch/x86/kernel/cpu/centaur.c         |    4 ++--
+ arch/x86/kernel/cpu/common.c          |    8 ++++----
+ arch/x86/kernel/cpu/cyrix.c           |    2 +-
+ arch/x86/kernel/cpu/intel.c           |   18 +++++++++---------
+ arch/x86/kernel/cpu/microcode/intel.c |    4 ++--
+ arch/x86/kernel/cpu/mtrr/generic.c    |    2 +-
+ arch/x86/kernel/cpu/mtrr/main.c       |    4 ++--
+ arch/x86/kernel/cpu/proc.c            |    4 ++--
+ arch/x86/kernel/head_32.S             |    4 ++--
+ arch/x86/kernel/mpparse.c             |    2 +-
+ arch/x86/lib/cpu.c                    |    2 +-
+ drivers/char/hw_random/via-rng.c      |    2 +-
+ drivers/cpufreq/acpi-cpufreq.c        |    2 +-
+ drivers/cpufreq/longhaul.c            |    6 +++---
+ drivers/cpufreq/p4-clockmod.c         |    2 +-
+ drivers/cpufreq/powernow-k7.c         |    2 +-
+ drivers/cpufreq/speedstep-centrino.c  |    4 ++--
+ drivers/cpufreq/speedstep-lib.c       |    6 +++---
+ drivers/crypto/padlock-aes.c          |    2 +-
+ drivers/edac/amd64_edac.c             |    2 +-
+ drivers/edac/mce_amd.c                |    2 +-
+ drivers/hwmon/coretemp.c              |    6 +++---
+ drivers/hwmon/hwmon-vid.c             |    2 +-
+ drivers/hwmon/k10temp.c               |    2 +-
+ drivers/hwmon/k8temp.c                |    2 +-
+ drivers/video/fbdev/geode/video_gx.c  |    2 +-
+ 34 files changed, 69 insertions(+), 69 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3363,7 +3363,7 @@ static int intel_snb_pebs_broken(int cpu
+               break;
+       case INTEL_FAM6_SANDYBRIDGE_X:
+-              switch (cpu_data(cpu).x86_mask) {
++              switch (cpu_data(cpu).x86_stepping) {
+               case 6: rev = 0x618; break;
+               case 7: rev = 0x70c; break;
+               }
+--- a/arch/x86/events/intel/lbr.c
++++ b/arch/x86/events/intel/lbr.c
+@@ -1131,7 +1131,7 @@ void __init intel_pmu_lbr_init_atom(void
+        * on PMU interrupt
+        */
+       if (boot_cpu_data.x86_model == 28
+-          && boot_cpu_data.x86_mask < 10) {
++          && boot_cpu_data.x86_stepping < 10) {
+               pr_cont("LBR disabled due to erratum");
+               return;
+       }
+--- a/arch/x86/events/intel/p6.c
++++ b/arch/x86/events/intel/p6.c
+@@ -233,7 +233,7 @@ static __initconst const struct x86_pmu
+ static __init void p6_pmu_rdpmc_quirk(void)
+ {
+-      if (boot_cpu_data.x86_mask < 9) {
++      if (boot_cpu_data.x86_stepping < 9) {
+               /*
+                * PPro erratum 26; fixed in stepping 9 and above.
+                */
+--- a/arch/x86/include/asm/acpi.h
++++ b/arch/x86/include/asm/acpi.h
+@@ -92,7 +92,7 @@ static inline unsigned int acpi_processo
+       if (boot_cpu_data.x86 == 0x0F &&
+           boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+           boot_cpu_data.x86_model <= 0x05 &&
+-          boot_cpu_data.x86_mask < 0x0A)
++          boot_cpu_data.x86_stepping < 0x0A)
+               return 1;
+       else if (amd_e400_c1e_detected)
+               return 1;
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -88,7 +88,7 @@ struct cpuinfo_x86 {
+       __u8                    x86;            /* CPU family */
+       __u8                    x86_vendor;     /* CPU vendor */
+       __u8                    x86_model;
+-      __u8                    x86_mask;
++      __u8                    x86_stepping;
+ #ifdef CONFIG_X86_32
+       char                    wp_works_ok;    /* It doesn't on 386's */
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -105,7 +105,7 @@ int amd_cache_northbridges(void)
+       if (boot_cpu_data.x86 == 0x10 &&
+           boot_cpu_data.x86_model >= 0x8 &&
+           (boot_cpu_data.x86_model > 0x9 ||
+-           boot_cpu_data.x86_mask >= 0x1))
++           boot_cpu_data.x86_stepping >= 0x1))
+               amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
+       if (boot_cpu_data.x86 == 0x15)
+--- a/arch/x86/kernel/asm-offsets_32.c
++++ b/arch/x86/kernel/asm-offsets_32.c
+@@ -20,7 +20,7 @@ void foo(void)
+       OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
+       OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
+       OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
+-      OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
++      OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
+       OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
+       OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
+       OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -118,7 +118,7 @@ static void init_amd_k6(struct cpuinfo_x
+               return;
+       }
+-      if (c->x86_model == 6 && c->x86_mask == 1) {
++      if (c->x86_model == 6 && c->x86_stepping == 1) {
+               const int K6_BUG_LOOP = 1000000;
+               int n;
+               void (*f_vide)(void);
+@@ -147,7 +147,7 @@ static void init_amd_k6(struct cpuinfo_x
+       /* K6 with old style WHCR */
+       if (c->x86_model < 8 ||
+-         (c->x86_model == 8 && c->x86_mask < 8)) {
++         (c->x86_model == 8 && c->x86_stepping < 8)) {
+               /* We can only write allocate on the low 508Mb */
+               if (mbytes > 508)
+                       mbytes = 508;
+@@ -166,7 +166,7 @@ static void init_amd_k6(struct cpuinfo_x
+               return;
+       }
+-      if ((c->x86_model == 8 && c->x86_mask > 7) ||
++      if ((c->x86_model == 8 && c->x86_stepping > 7) ||
+            c->x86_model == 9 || c->x86_model == 13) {
+               /* The more serious chips .. */
+@@ -219,7 +219,7 @@ static void init_amd_k7(struct cpuinfo_x
+        * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
+        * As per AMD technical note 27212 0.2
+        */
+-      if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
++      if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
+               rdmsr(MSR_K7_CLK_CTL, l, h);
+               if ((l & 0xfff00000) != 0x20000000) {
+                       pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
+@@ -239,12 +239,12 @@ static void init_amd_k7(struct cpuinfo_x
+        * but they are not certified as MP capable.
+        */
+       /* Athlon 660/661 is valid. */
+-      if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
+-          (c->x86_mask == 1)))
++      if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
++          (c->x86_stepping == 1)))
+               return;
+       /* Duron 670 is valid */
+-      if ((c->x86_model == 7) && (c->x86_mask == 0))
++      if ((c->x86_model == 7) && (c->x86_stepping == 0))
+               return;
+       /*
+@@ -254,8 +254,8 @@ static void init_amd_k7(struct cpuinfo_x
+        * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
+        * more.
+        */
+-      if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
+-          ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
++      if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
++          ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
+            (c->x86_model > 7))
+               if (cpu_has(c, X86_FEATURE_MP))
+                       return;
+@@ -569,7 +569,7 @@ static void early_init_amd(struct cpuinf
+       /*  Set MTRR capability flag if appropriate */
+       if (c->x86 == 5)
+               if (c->x86_model == 13 || c->x86_model == 9 ||
+-                  (c->x86_model == 8 && c->x86_mask >= 8))
++                  (c->x86_model == 8 && c->x86_stepping >= 8))
+                       set_cpu_cap(c, X86_FEATURE_K6_MTRR);
+ #endif
+ #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
+@@ -834,11 +834,11 @@ static unsigned int amd_size_cache(struc
+       /* AMD errata T13 (order #21922) */
+       if ((c->x86 == 6)) {
+               /* Duron Rev A0 */
+-              if (c->x86_model == 3 && c->x86_mask == 0)
++              if (c->x86_model == 3 && c->x86_stepping == 0)
+                       size = 64;
+               /* Tbird rev A1/A2 */
+               if (c->x86_model == 4 &&
+-                      (c->x86_mask == 0 || c->x86_mask == 1))
++                      (c->x86_stepping == 0 || c->x86_stepping == 1))
+                       size = 256;
+       }
+       return size;
+@@ -975,7 +975,7 @@ static bool cpu_has_amd_erratum(struct c
+       }
+       /* OSVW unavailable or ID unknown, match family-model-stepping range */
+-      ms = (cpu->x86_model << 4) | cpu->x86_mask;
++      ms = (cpu->x86_model << 4) | cpu->x86_stepping;
+       while ((range = *erratum++))
+               if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
+                   (ms >= AMD_MODEL_RANGE_START(range)) &&
+--- a/arch/x86/kernel/cpu/centaur.c
++++ b/arch/x86/kernel/cpu/centaur.c
+@@ -134,7 +134,7 @@ static void init_centaur(struct cpuinfo_
+                       clear_cpu_cap(c, X86_FEATURE_TSC);
+                       break;
+               case 8:
+-                      switch (c->x86_mask) {
++                      switch (c->x86_stepping) {
+                       default:
+                       name = "2";
+                               break;
+@@ -209,7 +209,7 @@ centaur_size_cache(struct cpuinfo_x86 *c
+        *  - Note, it seems this may only be in engineering samples.
+        */
+       if ((c->x86 == 6) && (c->x86_model == 9) &&
+-                              (c->x86_mask == 1) && (size == 65))
++                              (c->x86_stepping == 1) && (size == 65))
+               size -= 1;
+       return size;
+ }
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -699,7 +699,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
+               cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
+               c->x86          = x86_family(tfms);
+               c->x86_model    = x86_model(tfms);
+-              c->x86_mask     = x86_stepping(tfms);
++              c->x86_stepping = x86_stepping(tfms);
+               if (cap0 & (1<<19)) {
+                       c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
+@@ -1146,7 +1146,7 @@ static void identify_cpu(struct cpuinfo_
+       c->loops_per_jiffy = loops_per_jiffy;
+       c->x86_cache_size = -1;
+       c->x86_vendor = X86_VENDOR_UNKNOWN;
+-      c->x86_model = c->x86_mask = 0; /* So far unknown... */
++      c->x86_model = c->x86_stepping = 0;     /* So far unknown... */
+       c->x86_vendor_id[0] = '\0'; /* Unset */
+       c->x86_model_id[0] = '\0';  /* Unset */
+       c->x86_max_cores = 1;
+@@ -1391,8 +1391,8 @@ void print_cpu_info(struct cpuinfo_x86 *
+       pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
+-      if (c->x86_mask || c->cpuid_level >= 0)
+-              pr_cont(", stepping: 0x%x)\n", c->x86_mask);
++      if (c->x86_stepping || c->cpuid_level >= 0)
++              pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
+       else
+               pr_cont(")\n");
+--- a/arch/x86/kernel/cpu/cyrix.c
++++ b/arch/x86/kernel/cpu/cyrix.c
+@@ -212,7 +212,7 @@ static void init_cyrix(struct cpuinfo_x8
+       /* common case step number/rev -- exceptions handled below */
+       c->x86_model = (dir1 >> 4) + 1;
+-      c->x86_mask = dir1 & 0xf;
++      c->x86_stepping = dir1 & 0xf;
+       /* Now cook; the original recipe is by Channing Corn, from Cyrix.
+        * We do the same thing for each generation: we work out
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -105,7 +105,7 @@ static bool bad_spectre_microcode(struct
+       for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
+               if (c->x86_model == spectre_bad_microcodes[i].model &&
+-                  c->x86_mask == spectre_bad_microcodes[i].stepping)
++                  c->x86_stepping == spectre_bad_microcodes[i].stepping)
+                       return (c->microcode <= spectre_bad_microcodes[i].microcode);
+       }
+       return false;
+@@ -158,7 +158,7 @@ static void early_init_intel(struct cpui
+        * need the microcode to have already been loaded... so if it is
+        * not, recommend a BIOS update and disable large pages.
+        */
+-      if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
++      if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
+           c->microcode < 0x20e) {
+               pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
+               clear_cpu_cap(c, X86_FEATURE_PSE);
+@@ -174,7 +174,7 @@ static void early_init_intel(struct cpui
+       /* CPUID workaround for 0F33/0F34 CPU */
+       if (c->x86 == 0xF && c->x86_model == 0x3
+-          && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
++          && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
+               c->x86_phys_bits = 36;
+       /*
+@@ -289,7 +289,7 @@ int ppro_with_ram_bug(void)
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+           boot_cpu_data.x86 == 6 &&
+           boot_cpu_data.x86_model == 1 &&
+-          boot_cpu_data.x86_mask < 8) {
++          boot_cpu_data.x86_stepping < 8) {
+               pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
+               return 1;
+       }
+@@ -306,7 +306,7 @@ static void intel_smp_check(struct cpuin
+        * Mask B, Pentium, but not Pentium MMX
+        */
+       if (c->x86 == 5 &&
+-          c->x86_mask >= 1 && c->x86_mask <= 4 &&
++          c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
+           c->x86_model <= 3) {
+               /*
+                * Remember we have B step Pentia with bugs
+@@ -349,7 +349,7 @@ static void intel_workarounds(struct cpu
+        * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
+        * model 3 mask 3
+        */
+-      if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
++      if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
+               clear_cpu_cap(c, X86_FEATURE_SEP);
+       /*
+@@ -367,7 +367,7 @@ static void intel_workarounds(struct cpu
+        * P4 Xeon erratum 037 workaround.
+        * Hardware prefetcher may cause stale data to be loaded into the cache.
+        */
+-      if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
++      if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
+               if (msr_set_bit(MSR_IA32_MISC_ENABLE,
+                               MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
+                       pr_info("CPU: C0 stepping P4 Xeon detected.\n");
+@@ -382,7 +382,7 @@ static void intel_workarounds(struct cpu
+        * Specification Update").
+        */
+       if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
+-          (c->x86_mask < 0x6 || c->x86_mask == 0xb))
++          (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
+               set_cpu_bug(c, X86_BUG_11AP);
+@@ -601,7 +601,7 @@ static void init_intel(struct cpuinfo_x8
+               case 6:
+                       if (l2 == 128)
+                               p = "Celeron (Mendocino)";
+-                      else if (c->x86_mask == 0 || c->x86_mask == 5)
++                      else if (c->x86_stepping == 0 || c->x86_stepping == 5)
+                               p = "Celeron-A";
+                       break;
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -1062,7 +1062,7 @@ static bool is_blacklisted(unsigned int
+        */
+       if (c->x86 == 6 &&
+           c->x86_model == INTEL_FAM6_BROADWELL_X &&
+-          c->x86_mask == 0x01 &&
++          c->x86_stepping == 0x01 &&
+           llc_size_per_core > 2621440 &&
+           c->microcode < 0x0b000021) {
+               pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
+@@ -1085,7 +1085,7 @@ static enum ucode_state request_microcod
+               return UCODE_NFOUND;
+       sprintf(name, "intel-ucode/%02x-%02x-%02x",
+-              c->x86, c->x86_model, c->x86_mask);
++              c->x86, c->x86_model, c->x86_stepping);
+       if (request_firmware_direct(&firmware, name, device)) {
+               pr_debug("data file %s load failed\n", name);
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned l
+        */
+       if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
+           boot_cpu_data.x86_model == 1 &&
+-          boot_cpu_data.x86_mask <= 7) {
++          boot_cpu_data.x86_stepping <= 7) {
+               if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
+                       pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
+                       return -EINVAL;
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -699,8 +699,8 @@ void __init mtrr_bp_init(void)
+                       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+                           boot_cpu_data.x86 == 0xF &&
+                           boot_cpu_data.x86_model == 0x3 &&
+-                          (boot_cpu_data.x86_mask == 0x3 ||
+-                           boot_cpu_data.x86_mask == 0x4))
++                          (boot_cpu_data.x86_stepping == 0x3 ||
++                           boot_cpu_data.x86_stepping == 0x4))
+                               phys_addr = 36;
+                       size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
+--- a/arch/x86/kernel/cpu/proc.c
++++ b/arch/x86/kernel/cpu/proc.c
+@@ -70,8 +70,8 @@ static int show_cpuinfo(struct seq_file
+                  c->x86_model,
+                  c->x86_model_id[0] ? c->x86_model_id : "unknown");
+-      if (c->x86_mask || c->cpuid_level >= 0)
+-              seq_printf(m, "stepping\t: %d\n", c->x86_mask);
++      if (c->x86_stepping || c->cpuid_level >= 0)
++              seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
+       else
+               seq_puts(m, "stepping\t: unknown\n");
+       if (c->microcode)
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -35,7 +35,7 @@
+ #define X86           new_cpu_data+CPUINFO_x86
+ #define X86_VENDOR    new_cpu_data+CPUINFO_x86_vendor
+ #define X86_MODEL     new_cpu_data+CPUINFO_x86_model
+-#define X86_MASK      new_cpu_data+CPUINFO_x86_mask
++#define X86_STEPPING  new_cpu_data+CPUINFO_x86_stepping
+ #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
+ #define X86_CPUID     new_cpu_data+CPUINFO_cpuid_level
+ #define X86_CAPABILITY        new_cpu_data+CPUINFO_x86_capability
+@@ -441,7 +441,7 @@ enable_paging:
+       shrb $4,%al
+       movb %al,X86_MODEL
+       andb $0x0f,%cl          # mask mask revision
+-      movb %cl,X86_MASK
++      movb %cl,X86_STEPPING
+       movl %edx,X86_CAPABILITY
+ is486:
+--- a/arch/x86/kernel/mpparse.c
++++ b/arch/x86/kernel/mpparse.c
+@@ -406,7 +406,7 @@ static inline void __init construct_defa
+       processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
+       processor.cpuflag = CPU_ENABLED;
+       processor.cpufeature = (boot_cpu_data.x86 << 8) |
+-          (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
++          (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
+       processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
+       processor.reserved[0] = 0;
+       processor.reserved[1] = 0;
+--- a/arch/x86/lib/cpu.c
++++ b/arch/x86/lib/cpu.c
+@@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig)
+ {
+       unsigned int fam, model;
+-       fam = x86_family(sig);
++      fam = x86_family(sig);
+       model = (sig >> 4) & 0xf;
+--- a/drivers/char/hw_random/via-rng.c
++++ b/drivers/char/hw_random/via-rng.c
+@@ -166,7 +166,7 @@ static int via_rng_init(struct hwrng *rn
+       /* Enable secondary noise source on CPUs where it is present. */
+       /* Nehemiah stepping 8 and higher */
+-      if ((c->x86_model == 9) && (c->x86_mask > 7))
++      if ((c->x86_model == 9) && (c->x86_stepping > 7))
+               lo |= VIA_NOISESRC2;
+       /* Esther */
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -648,7 +648,7 @@ static int acpi_cpufreq_blacklist(struct
+       if (c->x86_vendor == X86_VENDOR_INTEL) {
+               if ((c->x86 == 15) &&
+                   (c->x86_model == 6) &&
+-                  (c->x86_mask == 8)) {
++                  (c->x86_stepping == 8)) {
+                       pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
+                       return -ENODEV;
+                   }
+--- a/drivers/cpufreq/longhaul.c
++++ b/drivers/cpufreq/longhaul.c
+@@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpuf
+               break;
+       case 7:
+-              switch (c->x86_mask) {
++              switch (c->x86_stepping) {
+               case 0:
+                       longhaul_version = TYPE_LONGHAUL_V1;
+                       cpu_model = CPU_SAMUEL2;
+@@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpuf
+                       break;
+               case 1 ... 15:
+                       longhaul_version = TYPE_LONGHAUL_V2;
+-                      if (c->x86_mask < 8) {
++                      if (c->x86_stepping < 8) {
+                               cpu_model = CPU_SAMUEL2;
+                               cpuname = "C3 'Samuel 2' [C5B]";
+                       } else {
+@@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpuf
+               numscales = 32;
+               memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
+               memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
+-              switch (c->x86_mask) {
++              switch (c->x86_stepping) {
+               case 0 ... 1:
+                       cpu_model = CPU_NEHEMIAH;
+                       cpuname = "C3 'Nehemiah A' [C5XLOE]";
+--- a/drivers/cpufreq/p4-clockmod.c
++++ b/drivers/cpufreq/p4-clockmod.c
+@@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cp
+ #endif
+       /* Errata workaround */
+-      cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
++      cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
+       switch (cpuid) {
+       case 0x0f07:
+       case 0x0f0a:
+--- a/drivers/cpufreq/powernow-k7.c
++++ b/drivers/cpufreq/powernow-k7.c
+@@ -131,7 +131,7 @@ static int check_powernow(void)
+               return 0;
+       }
+-      if ((c->x86_model == 6) && (c->x86_mask == 0)) {
++      if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
+               pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
+               have_a0 = 1;
+       }
+--- a/drivers/cpufreq/speedstep-centrino.c
++++ b/drivers/cpufreq/speedstep-centrino.c
+@@ -37,7 +37,7 @@ struct cpu_id
+ {
+       __u8    x86;            /* CPU family */
+       __u8    x86_model;      /* model */
+-      __u8    x86_mask;       /* stepping */
++      __u8    x86_stepping;   /* stepping */
+ };
+ enum {
+@@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const
+ {
+       if ((c->x86 == x->x86) &&
+           (c->x86_model == x->x86_model) &&
+-          (c->x86_mask == x->x86_mask))
++          (c->x86_stepping == x->x86_stepping))
+               return 1;
+       return 0;
+ }
+--- a/drivers/cpufreq/speedstep-lib.c
++++ b/drivers/cpufreq/speedstep-lib.c
+@@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(
+               ebx = cpuid_ebx(0x00000001);
+               ebx &= 0x000000FF;
+-              pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
++              pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
+-              switch (c->x86_mask) {
++              switch (c->x86_stepping) {
+               case 4:
+                       /*
+                        * B-stepping [M-P4-M]
+@@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(
+                               msr_lo, msr_hi);
+               if ((msr_hi & (1<<18)) &&
+                   (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
+-                      if (c->x86_mask == 0x01) {
++                      if (c->x86_stepping == 0x01) {
+                               pr_debug("early PIII version\n");
+                               return SPEEDSTEP_CPU_PIII_C_EARLY;
+                       } else
+--- a/drivers/crypto/padlock-aes.c
++++ b/drivers/crypto/padlock-aes.c
+@@ -531,7 +531,7 @@ static int __init padlock_init(void)
+       printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
+-      if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
++      if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
+               ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
+               cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
+               printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2719,7 +2719,7 @@ static struct amd64_family_type *per_fam
+       struct amd64_family_type *fam_type = NULL;
+       pvt->ext_model  = boot_cpu_data.x86_model >> 4;
+-      pvt->stepping   = boot_cpu_data.x86_mask;
++      pvt->stepping   = boot_cpu_data.x86_stepping;
+       pvt->model      = boot_cpu_data.x86_model;
+       pvt->fam        = boot_cpu_data.x86;
+--- a/drivers/edac/mce_amd.c
++++ b/drivers/edac/mce_amd.c
+@@ -948,7 +948,7 @@ int amd_decode_mce(struct notifier_block
+       pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s",
+               m->extcpu,
+-              c->x86, c->x86_model, c->x86_mask,
++              c->x86, c->x86_model, c->x86_stepping,
+               m->bank,
+               ((m->status & MCI_STATUS_OVER)  ? "Over"  : "-"),
+               ((m->status & MCI_STATUS_UC)    ? "UE"    :
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -269,13 +269,13 @@ static int adjust_tjmax(struct cpuinfo_x
+       for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
+               const struct tjmax_model *tm = &tjmax_model_table[i];
+               if (c->x86_model == tm->model &&
+-                  (tm->mask == ANY || c->x86_mask == tm->mask))
++                  (tm->mask == ANY || c->x86_stepping == tm->mask))
+                       return tm->tjmax;
+       }
+       /* Early chips have no MSR for TjMax */
+-      if (c->x86_model == 0xf && c->x86_mask < 4)
++      if (c->x86_model == 0xf && c->x86_stepping < 4)
+               usemsr_ee = 0;
+       if (c->x86_model > 0xe && usemsr_ee) {
+@@ -426,7 +426,7 @@ static int chk_ucode_version(unsigned in
+        * Readings might stop update when processor visited too deep sleep,
+        * fixed for stepping D0 (6EC).
+        */
+-      if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
++      if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
+               pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
+               return -ENODEV;
+       }
+--- a/drivers/hwmon/hwmon-vid.c
++++ b/drivers/hwmon/hwmon-vid.c
+@@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
+       if (c->x86 < 6)         /* Any CPU with family lower than 6 */
+               return 0;       /* doesn't have VID */
+-      vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor);
++      vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
+       if (vrm_ret == 134)
+               vrm_ret = get_via_model_d_vrm();
+       if (vrm_ret == 0)
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -179,7 +179,7 @@ static bool has_erratum_319(struct pci_d
+        * and AM3 formats, but that's the best we can do.
+        */
+       return boot_cpu_data.x86_model < 4 ||
+-             (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
++             (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
+ }
+ static int k10temp_probe(struct pci_dev *pdev,
+--- a/drivers/hwmon/k8temp.c
++++ b/drivers/hwmon/k8temp.c
+@@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *
+               return -ENOMEM;
+       model = boot_cpu_data.x86_model;
+-      stepping = boot_cpu_data.x86_mask;
++      stepping = boot_cpu_data.x86_stepping;
+       /* feature available since SH-C0, exclude older revisions */
+       if ((model == 4 && stepping == 0) ||
+--- a/drivers/video/fbdev/geode/video_gx.c
++++ b/drivers/video/fbdev/geode/video_gx.c
+@@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_inf
+       int timeout = 1000;
+       /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
+-      if (cpu_data(0).x86_mask == 1) {
++      if (cpu_data(0).x86_stepping == 1) {
+               pll_table = gx_pll_table_14MHz;
+               pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
+       } else {
diff --git a/queue-4.9/x86-spectre-fix-an-error-message.patch b/queue-4.9/x86-spectre-fix-an-error-message.patch
new file mode 100644 (file)
index 0000000..eed3f1b
--- /dev/null
@@ -0,0 +1,42 @@
+From 9de29eac8d2189424d81c0d840cd0469aa3d41c8 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 14 Feb 2018 10:14:17 +0300
+Subject: x86/spectre: Fix an error message
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 9de29eac8d2189424d81c0d840cd0469aa3d41c8 upstream.
+
+If i == ARRAY_SIZE(mitigation_options) then we accidentally print
+garbage from one space beyond the end of the mitigation_options[] array.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: KarimAllah Ahmed <karahmed@amazon.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: kernel-janitors@vger.kernel.org
+Fixes: 9005c6834c0f ("x86/spectre: Simplify spectre_v2 command line parsing")
+Link: http://lkml.kernel.org/r/20180214071416.GA26677@mwanda
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/bugs.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -173,7 +173,7 @@ static enum spectre_v2_mitigation_cmd __
+               }
+               if (i >= ARRAY_SIZE(mitigation_options)) {
+-                      pr_err("unknown option (%s). Switching to AUTO select\n", mitigation_options[i].option);
++                      pr_err("unknown option (%s). Switching to AUTO select\n", arg);
+                       return SPECTRE_V2_CMD_AUTO;
+               }
+       }
diff --git a/queue-4.9/x86-speculation-add-asm-msr-index.h-dependency.patch b/queue-4.9/x86-speculation-add-asm-msr-index.h-dependency.patch
new file mode 100644 (file)
index 0000000..fe2a5b6
--- /dev/null
@@ -0,0 +1,48 @@
+From ea00f301285ea2f07393678cd2b6057878320c9d Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 13 Feb 2018 14:28:19 +0100
+Subject: x86/speculation: Add <asm/msr-index.h> dependency
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit ea00f301285ea2f07393678cd2b6057878320c9d upstream.
+
+Joe Konno reported a compile failure resulting from using an MSR
+without inclusion of <asm/msr-index.h>, and while the current code builds
+fine (by accident) this needs fixing for future patches.
+
+Reported-by: Joe Konno <joe.konno@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: arjan@linux.intel.com
+Cc: bp@alien8.de
+Cc: dan.j.williams@intel.com
+Cc: dave.hansen@linux.intel.com
+Cc: dwmw2@infradead.org
+Cc: dwmw@amazon.co.uk
+Cc: gregkh@linuxfoundation.org
+Cc: hpa@zytor.com
+Cc: jpoimboe@redhat.com
+Cc: linux-tip-commits@vger.kernel.org
+Cc: luto@kernel.org
+Fixes: 20ffa1caecca ("x86/speculation: Add basic IBPB (Indirect Branch Prediction Barrier) support")
+Link: http://lkml.kernel.org/r/20180213132819.GJ25201@hirez.programming.kicks-ass.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/nospec-branch.h |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -6,6 +6,7 @@
+ #include <asm/alternative.h>
+ #include <asm/alternative-asm.h>
+ #include <asm/cpufeatures.h>
++#include <asm/msr-index.h>
+ #ifdef __ASSEMBLY__
diff --git a/queue-4.9/x86-speculation-fix-up-array_index_nospec_mask-asm-constraint.patch b/queue-4.9/x86-speculation-fix-up-array_index_nospec_mask-asm-constraint.patch
new file mode 100644 (file)
index 0000000..91ac57f
--- /dev/null
@@ -0,0 +1,36 @@
+From be3233fbfcb8f5acb6e3bcd0895c3ef9e100d470 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Tue, 6 Feb 2018 18:22:40 -0800
+Subject: x86/speculation: Fix up array_index_nospec_mask() asm constraint
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit be3233fbfcb8f5acb6e3bcd0895c3ef9e100d470 upstream.
+
+Allow the compiler to handle @size as an immediate value or memory
+directly rather than allocating a register.
+
+Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/151797010204.1289.1510000292250184993.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/barrier.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -39,7 +39,7 @@ static inline unsigned long array_index_
+       asm ("cmp %1,%2; sbb %0,%0;"
+                       :"=r" (mask)
+-                      :"r"(size),"r" (index)
++                      :"g"(size),"r" (index)
+                       :"cc");
+       return mask;
+ }