]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 13 Jun 2018 16:47:07 +0000 (18:47 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 13 Jun 2018 16:47:07 +0000 (18:47 +0200)
added patches:
x86-cpufeature-remove-cpu_has_arch_perfmon.patch
x86-cpufeature-remove-cpu_has_clflush.patch
x86-cpufeature-remove-cpu_has_gbpages.patch
x86-cpufeature-remove-cpu_has_osxsave.patch
x86-cpufeature-remove-cpu_has_pse.patch
x86-cpufeature-remove-cpu_has_x2apic.patch
x86-cpufeature-remove-cpu_has_xmm2.patch
x86-cpufeature-remove-unused-and-seldomly-used-cpu_has_xx-macros.patch
x86-cpufeature-replace-cpu_has_aes-with-boot_cpu_has-usage.patch
x86-cpufeature-replace-cpu_has_avx2-with-boot_cpu_has-usage.patch
x86-fpu-default-eagerfpu-on-on-all-cpus.patch
x86-fpu-disable-avx-when-eagerfpu-is-off.patch
x86-fpu-disable-mpx-when-eagerfpu-is-off.patch
x86-fpu-fix-eager-fpu-handling-on-legacy-fpu-machines.patch
x86-fpu-fix-early-fpu-command-line-parsing.patch
x86-fpu-fix-no387-regression.patch
x86-fpu-hard-disable-lazy-fpu-mode.patch
x86-fpu-revert-x86-fpu-disable-avx-when-eagerfpu-is-off.patch
x86-mm-pat-x86-cpufeature-remove-cpu_has_pat.patch
x86-remove-unused-function-cpu_has_ht_siblings.patch

20 files changed:
queue-4.4/x86-cpufeature-remove-cpu_has_arch_perfmon.patch [new file with mode: 0644]
queue-4.4/x86-cpufeature-remove-cpu_has_clflush.patch [new file with mode: 0644]
queue-4.4/x86-cpufeature-remove-cpu_has_gbpages.patch [new file with mode: 0644]
queue-4.4/x86-cpufeature-remove-cpu_has_osxsave.patch [new file with mode: 0644]
queue-4.4/x86-cpufeature-remove-cpu_has_pse.patch [new file with mode: 0644]
queue-4.4/x86-cpufeature-remove-cpu_has_x2apic.patch [new file with mode: 0644]
queue-4.4/x86-cpufeature-remove-cpu_has_xmm2.patch [new file with mode: 0644]
queue-4.4/x86-cpufeature-remove-unused-and-seldomly-used-cpu_has_xx-macros.patch [new file with mode: 0644]
queue-4.4/x86-cpufeature-replace-cpu_has_aes-with-boot_cpu_has-usage.patch [new file with mode: 0644]
queue-4.4/x86-cpufeature-replace-cpu_has_avx2-with-boot_cpu_has-usage.patch [new file with mode: 0644]
queue-4.4/x86-fpu-default-eagerfpu-on-on-all-cpus.patch [new file with mode: 0644]
queue-4.4/x86-fpu-disable-avx-when-eagerfpu-is-off.patch [new file with mode: 0644]
queue-4.4/x86-fpu-disable-mpx-when-eagerfpu-is-off.patch [new file with mode: 0644]
queue-4.4/x86-fpu-fix-eager-fpu-handling-on-legacy-fpu-machines.patch [new file with mode: 0644]
queue-4.4/x86-fpu-fix-early-fpu-command-line-parsing.patch [new file with mode: 0644]
queue-4.4/x86-fpu-fix-no387-regression.patch [new file with mode: 0644]
queue-4.4/x86-fpu-hard-disable-lazy-fpu-mode.patch [new file with mode: 0644]
queue-4.4/x86-fpu-revert-x86-fpu-disable-avx-when-eagerfpu-is-off.patch [new file with mode: 0644]
queue-4.4/x86-mm-pat-x86-cpufeature-remove-cpu_has_pat.patch [new file with mode: 0644]
queue-4.4/x86-remove-unused-function-cpu_has_ht_siblings.patch [new file with mode: 0644]

diff --git a/queue-4.4/x86-cpufeature-remove-cpu_has_arch_perfmon.patch b/queue-4.4/x86-cpufeature-remove-cpu_has_arch_perfmon.patch
new file mode 100644 (file)
index 0000000..e5430b7
--- /dev/null
@@ -0,0 +1,67 @@
+From 7b5e74e637e4a977c7cf40fd7de332f60b68180e Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 29 Mar 2016 17:41:54 +0200
+Subject: x86/cpufeature: Remove cpu_has_arch_perfmon
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 7b5e74e637e4a977c7cf40fd7de332f60b68180e upstream.
+
+Use boot_cpu_has() instead.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: oprofile-list@lists.sf.net
+Link: http://lkml.kernel.org/r/1459266123-21878-2-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/cpufeature.h |    1 -
+ arch/x86/oprofile/nmi_int.c       |    4 ++--
+ arch/x86/oprofile/op_model_ppro.c |    2 +-
+ 3 files changed, 3 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -380,7 +380,6 @@ extern const char * const x86_bug_flags[
+ #define cpu_has_avx2          boot_cpu_has(X86_FEATURE_AVX2)
+ #define cpu_has_clflush               boot_cpu_has(X86_FEATURE_CLFLUSH)
+ #define cpu_has_gbpages               boot_cpu_has(X86_FEATURE_GBPAGES)
+-#define cpu_has_arch_perfmon  boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
+ #define cpu_has_pat           boot_cpu_has(X86_FEATURE_PAT)
+ #define cpu_has_x2apic                boot_cpu_has(X86_FEATURE_X2APIC)
+ #define cpu_has_xsave         boot_cpu_has(X86_FEATURE_XSAVE)
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -635,7 +635,7 @@ static int __init ppro_init(char **cpu_t
+       __u8 cpu_model = boot_cpu_data.x86_model;
+       struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
+-      if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon)
++      if (force_cpu_type == arch_perfmon && boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
+               return 0;
+       /*
+@@ -760,7 +760,7 @@ int __init op_nmi_init(struct oprofile_o
+               if (cpu_type)
+                       break;
+-              if (!cpu_has_arch_perfmon)
++              if (!boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
+                       return -ENODEV;
+               /* use arch perfmon as fallback */
+--- a/arch/x86/oprofile/op_model_ppro.c
++++ b/arch/x86/oprofile/op_model_ppro.c
+@@ -75,7 +75,7 @@ static void ppro_setup_ctrs(struct op_x8
+       u64 val;
+       int i;
+-      if (cpu_has_arch_perfmon) {
++      if (boot_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
+               union cpuid10_eax eax;
+               eax.full = cpuid_eax(0xa);
diff --git a/queue-4.4/x86-cpufeature-remove-cpu_has_clflush.patch b/queue-4.4/x86-cpufeature-remove-cpu_has_clflush.patch
new file mode 100644 (file)
index 0000000..cfccb06
--- /dev/null
@@ -0,0 +1,113 @@
+From 906bf7fda2c9cf5c1762ec607943ed54b6c5b203 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 29 Mar 2016 17:41:59 +0200
+Subject: x86/cpufeature: Remove cpu_has_clflush
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 906bf7fda2c9cf5c1762ec607943ed54b6c5b203 upstream.
+
+Use the fast variant in the DRM code.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: dri-devel@lists.freedesktop.org
+Cc: intel-gfx@lists.freedesktop.org
+Link: http://lkml.kernel.org/r/1459266123-21878-7-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/cpufeature.h          |    1 -
+ arch/x86/kernel/cpu/intel.c                |    2 +-
+ arch/x86/kernel/tce_64.c                   |    2 +-
+ arch/x86/mm/pageattr.c                     |    2 +-
+ drivers/gpu/drm/drm_cache.c                |    6 +++---
+ drivers/gpu/drm/i915/i915_gem_execbuffer.c |    2 +-
+ 6 files changed, 7 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -378,7 +378,6 @@ extern const char * const x86_bug_flags[
+ #define cpu_has_aes           boot_cpu_has(X86_FEATURE_AES)
+ #define cpu_has_avx           boot_cpu_has(X86_FEATURE_AVX)
+ #define cpu_has_avx2          boot_cpu_has(X86_FEATURE_AVX2)
+-#define cpu_has_clflush               boot_cpu_has(X86_FEATURE_CLFLUSH)
+ #define cpu_has_xsave         boot_cpu_has(X86_FEATURE_XSAVE)
+ #define cpu_has_xsaves                boot_cpu_has(X86_FEATURE_XSAVES)
+ #define cpu_has_hypervisor    boot_cpu_has(X86_FEATURE_HYPERVISOR)
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -455,7 +455,7 @@ static void init_intel(struct cpuinfo_x8
+                       set_cpu_cap(c, X86_FEATURE_PEBS);
+       }
+-      if (c->x86 == 6 && cpu_has_clflush &&
++      if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
+           (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
+               set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
+--- a/arch/x86/kernel/tce_64.c
++++ b/arch/x86/kernel/tce_64.c
+@@ -40,7 +40,7 @@
+ static inline void flush_tce(void* tceaddr)
+ {
+       /* a single tce can't cross a cache line */
+-      if (cpu_has_clflush)
++      if (boot_cpu_has(X86_FEATURE_CLFLUSH))
+               clflush(tceaddr);
+       else
+               wbinvd();
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -1481,7 +1481,7 @@ static int change_page_attr_set_clr(unsi
+        * error case we fall back to cpa_flush_all (which uses
+        * WBINVD):
+        */
+-      if (!ret && cpu_has_clflush) {
++      if (!ret && boot_cpu_has(X86_FEATURE_CLFLUSH)) {
+               if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
+                       cpa_flush_array(addr, numpages, cache,
+                                       cpa.flags, pages);
+--- a/drivers/gpu/drm/drm_cache.c
++++ b/drivers/gpu/drm/drm_cache.c
+@@ -72,7 +72,7 @@ drm_clflush_pages(struct page *pages[],
+ {
+ #if defined(CONFIG_X86)
+-      if (cpu_has_clflush) {
++      if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
+               drm_cache_flush_clflush(pages, num_pages);
+               return;
+       }
+@@ -105,7 +105,7 @@ void
+ drm_clflush_sg(struct sg_table *st)
+ {
+ #if defined(CONFIG_X86)
+-      if (cpu_has_clflush) {
++      if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
+               struct sg_page_iter sg_iter;
+               mb();
+@@ -129,7 +129,7 @@ void
+ drm_clflush_virt_range(void *addr, unsigned long length)
+ {
+ #if defined(CONFIG_X86)
+-      if (cpu_has_clflush) {
++      if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
+               const int size = boot_cpu_data.x86_clflush_size;
+               void *end = addr + length;
+               addr = (void *)(((unsigned long)addr) & -size);
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -466,7 +466,7 @@ i915_gem_execbuffer_relocate_entry(struc
+               ret = relocate_entry_cpu(obj, reloc, target_offset);
+       else if (obj->map_and_fenceable)
+               ret = relocate_entry_gtt(obj, reloc, target_offset);
+-      else if (cpu_has_clflush)
++      else if (static_cpu_has(X86_FEATURE_CLFLUSH))
+               ret = relocate_entry_clflush(obj, reloc, target_offset);
+       else {
+               WARN_ONCE(1, "Impossible case in relocation handling\n");
diff --git a/queue-4.4/x86-cpufeature-remove-cpu_has_gbpages.patch b/queue-4.4/x86-cpufeature-remove-cpu_has_gbpages.patch
new file mode 100644 (file)
index 0000000..4c079b2
--- /dev/null
@@ -0,0 +1,89 @@
+From b8291adc191abec2095f03a130ac91506d345cae Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 29 Mar 2016 17:41:58 +0200
+Subject: x86/cpufeature: Remove cpu_has_gbpages
+
+From: Borislav Petkov <bp@suse.de>
+
+commit b8291adc191abec2095f03a130ac91506d345cae upstream.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1459266123-21878-6-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/cpufeature.h |    1 -
+ arch/x86/kvm/mmu.c                |    3 ++-
+ arch/x86/mm/hugetlbpage.c         |    4 ++--
+ arch/x86/mm/init.c                |    2 +-
+ arch/x86/mm/ioremap.c             |    2 +-
+ 5 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -379,7 +379,6 @@ extern const char * const x86_bug_flags[
+ #define cpu_has_avx           boot_cpu_has(X86_FEATURE_AVX)
+ #define cpu_has_avx2          boot_cpu_has(X86_FEATURE_AVX2)
+ #define cpu_has_clflush               boot_cpu_has(X86_FEATURE_CLFLUSH)
+-#define cpu_has_gbpages               boot_cpu_has(X86_FEATURE_GBPAGES)
+ #define cpu_has_xsave         boot_cpu_has(X86_FEATURE_XSAVE)
+ #define cpu_has_xsaves                boot_cpu_has(X86_FEATURE_XSAVES)
+ #define cpu_has_hypervisor    boot_cpu_has(X86_FEATURE_HYPERVISOR)
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3789,7 +3789,8 @@ reset_tdp_shadow_zero_bits_mask(struct k
+               __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+                                       boot_cpu_data.x86_phys_bits,
+                                       context->shadow_root_level, false,
+-                                      cpu_has_gbpages, true, true);
++                                      boot_cpu_has(X86_FEATURE_GBPAGES),
++                                      true, true);
+       else
+               __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
+                                           boot_cpu_data.x86_phys_bits,
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -162,7 +162,7 @@ static __init int setup_hugepagesz(char
+       unsigned long ps = memparse(opt, &opt);
+       if (ps == PMD_SIZE) {
+               hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
+-      } else if (ps == PUD_SIZE && cpu_has_gbpages) {
++      } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
+               hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+       } else {
+               printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
+@@ -177,7 +177,7 @@ __setup("hugepagesz=", setup_hugepagesz)
+ static __init int gigantic_pages_init(void)
+ {
+       /* With CMA we can allocate gigantic pages at runtime */
+-      if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
++      if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
+               hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+       return 0;
+ }
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -172,7 +172,7 @@ static void __init probe_page_size_mask(
+               __supported_pte_mask &= ~_PAGE_GLOBAL;
+       /* Enable 1 GB linear kernel mappings if available: */
+-      if (direct_gbpages && cpu_has_gbpages) {
++      if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
+               printk(KERN_INFO "Using GB pages for direct mapping\n");
+               page_size_mask |= 1 << PG_LEVEL_1G;
+       } else {
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -378,7 +378,7 @@ EXPORT_SYMBOL(iounmap);
+ int __init arch_ioremap_pud_supported(void)
+ {
+ #ifdef CONFIG_X86_64
+-      return cpu_has_gbpages;
++      return boot_cpu_has(X86_FEATURE_GBPAGES);
+ #else
+       return 0;
+ #endif
diff --git a/queue-4.4/x86-cpufeature-remove-cpu_has_osxsave.patch b/queue-4.4/x86-cpufeature-remove-cpu_has_osxsave.patch
new file mode 100644 (file)
index 0000000..7244671
--- /dev/null
@@ -0,0 +1,89 @@
+From ab4a56fa2c6ce9384ca077b6570c56fe18361f17 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 29 Mar 2016 17:41:56 +0200
+Subject: x86/cpufeature: Remove cpu_has_osxsave
+
+From: Borislav Petkov <bp@suse.de>
+
+commit ab4a56fa2c6ce9384ca077b6570c56fe18361f17 upstream.
+
+Use boot_cpu_has() instead.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-crypto@vger.kernel.org
+Link: http://lkml.kernel.org/r/1459266123-21878-4-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/camellia_aesni_avx2_glue.c |    3 ++-
+ arch/x86/crypto/camellia_aesni_avx_glue.c  |    2 +-
+ arch/x86/crypto/serpent_avx2_glue.c        |    2 +-
+ arch/x86/include/asm/cpufeature.h          |    1 -
+ arch/x86/include/asm/xor_avx.h             |    4 ++--
+ 5 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
++++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
+@@ -562,7 +562,8 @@ static int __init camellia_aesni_init(vo
+ {
+       const char *feature_name;
+-      if (!cpu_has_avx2 || !cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
++      if (!cpu_has_avx2 || !cpu_has_avx || !cpu_has_aes ||
++          !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX2 or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+       }
+--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
++++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
+@@ -554,7 +554,7 @@ static int __init camellia_aesni_init(vo
+ {
+       const char *feature_name;
+-      if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
++      if (!cpu_has_avx || !cpu_has_aes || !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+       }
+--- a/arch/x86/crypto/serpent_avx2_glue.c
++++ b/arch/x86/crypto/serpent_avx2_glue.c
+@@ -538,7 +538,7 @@ static int __init init(void)
+ {
+       const char *feature_name;
+-      if (!cpu_has_avx2 || !cpu_has_osxsave) {
++      if (!cpu_has_avx2 || !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX2 instructions are not detected.\n");
+               return -ENODEV;
+       }
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -383,7 +383,6 @@ extern const char * const x86_bug_flags[
+ #define cpu_has_x2apic                boot_cpu_has(X86_FEATURE_X2APIC)
+ #define cpu_has_xsave         boot_cpu_has(X86_FEATURE_XSAVE)
+ #define cpu_has_xsaves                boot_cpu_has(X86_FEATURE_XSAVES)
+-#define cpu_has_osxsave               boot_cpu_has(X86_FEATURE_OSXSAVE)
+ #define cpu_has_hypervisor    boot_cpu_has(X86_FEATURE_HYPERVISOR)
+ /*
+  * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
+--- a/arch/x86/include/asm/xor_avx.h
++++ b/arch/x86/include/asm/xor_avx.h
+@@ -167,12 +167,12 @@ static struct xor_block_template xor_blo
+ #define AVX_XOR_SPEED \
+ do { \
+-      if (cpu_has_avx && cpu_has_osxsave) \
++      if (cpu_has_avx && boot_cpu_has(X86_FEATURE_OSXSAVE)) \
+               xor_speed(&xor_block_avx); \
+ } while (0)
+ #define AVX_SELECT(FASTEST) \
+-      (cpu_has_avx && cpu_has_osxsave ? &xor_block_avx : FASTEST)
++      (cpu_has_avx && boot_cpu_has(X86_FEATURE_OSXSAVE) ? &xor_block_avx : FASTEST)
+ #else
diff --git a/queue-4.4/x86-cpufeature-remove-cpu_has_pse.patch b/queue-4.4/x86-cpufeature-remove-cpu_has_pse.patch
new file mode 100644 (file)
index 0000000..da4f8e5
--- /dev/null
@@ -0,0 +1,142 @@
+From 16bf92261b1b6cb1a1c0671b445a2fcb5a1ecc96 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 29 Mar 2016 17:42:03 +0200
+Subject: x86/cpufeature: Remove cpu_has_pse
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 16bf92261b1b6cb1a1c0671b445a2fcb5a1ecc96 upstream.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1459266123-21878-11-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/cpufeature.h |    1 -
+ arch/x86/include/asm/hugetlb.h    |    2 +-
+ arch/x86/include/asm/pgtable.h    |    2 +-
+ arch/x86/mm/init.c                |    4 ++--
+ arch/x86/mm/init_32.c             |    2 +-
+ arch/x86/mm/init_64.c             |    4 ++--
+ arch/x86/mm/ioremap.c             |    2 +-
+ arch/x86/power/hibernate_32.c     |    2 +-
+ arch/x86/xen/enlighten.c          |    2 +-
+ 9 files changed, 10 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -368,7 +368,6 @@ extern const char * const x86_bug_flags[
+ #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
+ #define cpu_has_fpu           boot_cpu_has(X86_FEATURE_FPU)
+-#define cpu_has_pse           boot_cpu_has(X86_FEATURE_PSE)
+ #define cpu_has_tsc           boot_cpu_has(X86_FEATURE_TSC)
+ #define cpu_has_pge           boot_cpu_has(X86_FEATURE_PGE)
+ #define cpu_has_apic          boot_cpu_has(X86_FEATURE_APIC)
+--- a/arch/x86/include/asm/hugetlb.h
++++ b/arch/x86/include/asm/hugetlb.h
+@@ -4,7 +4,7 @@
+ #include <asm/page.h>
+ #include <asm-generic/hugetlb.h>
+-#define hugepages_supported() cpu_has_pse
++#define hugepages_supported() boot_cpu_has(X86_FEATURE_PSE)
+ static inline int is_hugepage_only_range(struct mm_struct *mm,
+                                        unsigned long addr,
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -183,7 +183,7 @@ static inline int pmd_trans_huge(pmd_t p
+ static inline int has_transparent_hugepage(void)
+ {
+-      return cpu_has_pse;
++      return boot_cpu_has(X86_FEATURE_PSE);
+ }
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -156,12 +156,12 @@ static void __init probe_page_size_mask(
+        * This will simplify cpa(), which otherwise needs to support splitting
+        * large pages into small in interrupt context, etc.
+        */
+-      if (cpu_has_pse)
++      if (boot_cpu_has(X86_FEATURE_PSE))
+               page_size_mask |= 1 << PG_LEVEL_2M;
+ #endif
+       /* Enable PSE if available */
+-      if (cpu_has_pse)
++      if (boot_cpu_has(X86_FEATURE_PSE))
+               cr4_set_bits_and_update_boot(X86_CR4_PSE);
+       /* Enable PGE if available */
+--- a/arch/x86/mm/init_32.c
++++ b/arch/x86/mm/init_32.c
+@@ -284,7 +284,7 @@ kernel_physical_mapping_init(unsigned lo
+        */
+       mapping_iter = 1;
+-      if (!cpu_has_pse)
++      if (!boot_cpu_has(X86_FEATURE_PSE))
+               use_pse = 0;
+ repeat:
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -1306,7 +1306,7 @@ int __meminit vmemmap_populate(unsigned
+ {
+       int err;
+-      if (cpu_has_pse)
++      if (boot_cpu_has(X86_FEATURE_PSE))
+               err = vmemmap_populate_hugepages(start, end, node);
+       else
+               err = vmemmap_populate_basepages(start, end, node);
+@@ -1345,7 +1345,7 @@ void register_page_bootmem_memmap(unsign
+               }
+               get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
+-              if (!cpu_has_pse) {
++              if (!boot_cpu_has(X86_FEATURE_PSE)) {
+                       next = (addr + PAGE_SIZE) & PAGE_MASK;
+                       pmd = pmd_offset(pud, addr);
+                       if (pmd_none(*pmd))
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -386,7 +386,7 @@ int __init arch_ioremap_pud_supported(vo
+ int __init arch_ioremap_pmd_supported(void)
+ {
+-      return cpu_has_pse;
++      return boot_cpu_has(X86_FEATURE_PSE);
+ }
+ /*
+--- a/arch/x86/power/hibernate_32.c
++++ b/arch/x86/power/hibernate_32.c
+@@ -106,7 +106,7 @@ static int resume_physical_mapping_init(
+                        * normal page tables.
+                        * NOTE: We can mark everything as executable here
+                        */
+-                      if (cpu_has_pse) {
++                      if (boot_cpu_has(X86_FEATURE_PSE)) {
+                               set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
+                               pfn += PTRS_PER_PTE;
+                       } else {
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1482,7 +1482,7 @@ static void xen_pvh_set_cr_flags(int cpu
+        * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
+        * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu().
+       */
+-      if (cpu_has_pse)
++      if (boot_cpu_has(X86_FEATURE_PSE))
+               cr4_set_bits_and_update_boot(X86_CR4_PSE);
+       if (cpu_has_pge)
diff --git a/queue-4.4/x86-cpufeature-remove-cpu_has_x2apic.patch b/queue-4.4/x86-cpufeature-remove-cpu_has_x2apic.patch
new file mode 100644 (file)
index 0000000..ba67973
--- /dev/null
@@ -0,0 +1,71 @@
+From 62436a4d36c94d202784cd8a997ff8bb4b880237 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 29 Mar 2016 17:41:57 +0200
+Subject: x86/cpufeature: Remove cpu_has_x2apic
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 62436a4d36c94d202784cd8a997ff8bb4b880237 upstream.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Tony Luck <tony.luck@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1459266123-21878-5-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/ia64/include/asm/iommu.h     |    1 -
+ arch/x86/include/asm/apic.h       |    4 ++--
+ arch/x86/include/asm/cpufeature.h |    1 -
+ arch/x86/kernel/apic/apic.c       |    2 +-
+ 4 files changed, 3 insertions(+), 5 deletions(-)
+
+--- a/arch/ia64/include/asm/iommu.h
++++ b/arch/ia64/include/asm/iommu.h
+@@ -1,7 +1,6 @@
+ #ifndef _ASM_IA64_IOMMU_H
+ #define _ASM_IA64_IOMMU_H 1
+-#define cpu_has_x2apic 0
+ /* 10 seconds */
+ #define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -235,10 +235,10 @@ extern void __init check_x2apic(void);
+ extern void x2apic_setup(void);
+ static inline int x2apic_enabled(void)
+ {
+-      return cpu_has_x2apic && apic_is_x2apic_enabled();
++      return boot_cpu_has(X86_FEATURE_X2APIC) && apic_is_x2apic_enabled();
+ }
+-#define x2apic_supported()    (cpu_has_x2apic)
++#define x2apic_supported()    (boot_cpu_has(X86_FEATURE_X2APIC))
+ #else /* !CONFIG_X86_X2APIC */
+ static inline void check_x2apic(void) { }
+ static inline void x2apic_setup(void) { }
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -380,7 +380,6 @@ extern const char * const x86_bug_flags[
+ #define cpu_has_avx2          boot_cpu_has(X86_FEATURE_AVX2)
+ #define cpu_has_clflush               boot_cpu_has(X86_FEATURE_CLFLUSH)
+ #define cpu_has_gbpages               boot_cpu_has(X86_FEATURE_GBPAGES)
+-#define cpu_has_x2apic                boot_cpu_has(X86_FEATURE_X2APIC)
+ #define cpu_has_xsave         boot_cpu_has(X86_FEATURE_XSAVE)
+ #define cpu_has_xsaves                boot_cpu_has(X86_FEATURE_XSAVES)
+ #define cpu_has_hypervisor    boot_cpu_has(X86_FEATURE_HYPERVISOR)
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1551,7 +1551,7 @@ void __init check_x2apic(void)
+               pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
+               x2apic_mode = 1;
+               x2apic_state = X2APIC_ON;
+-      } else if (!cpu_has_x2apic) {
++      } else if (!boot_cpu_has(X86_FEATURE_X2APIC)) {
+               x2apic_state = X2APIC_DISABLED;
+       }
+ }
diff --git a/queue-4.4/x86-cpufeature-remove-cpu_has_xmm2.patch b/queue-4.4/x86-cpufeature-remove-cpu_has_xmm2.patch
new file mode 100644 (file)
index 0000000..dd8fd24
--- /dev/null
@@ -0,0 +1,101 @@
+From 054efb6467f84490bdf92afab6d9dbd5102e620a Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 29 Mar 2016 17:42:00 +0200
+Subject: x86/cpufeature: Remove cpu_has_xmm2
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 054efb6467f84490bdf92afab6d9dbd5102e620a upstream.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-crypto@vger.kernel.org
+Link: http://lkml.kernel.org/r/1459266123-21878-8-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/poly1305_glue.c     |    2 +-
+ arch/x86/crypto/serpent_sse2_glue.c |    2 +-
+ arch/x86/include/asm/cpufeature.h   |    1 -
+ arch/x86/kernel/cpu/amd.c           |    2 +-
+ arch/x86/kernel/cpu/intel.c         |    2 +-
+ arch/x86/lib/usercopy_32.c          |    4 ++--
+ 6 files changed, 6 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/crypto/poly1305_glue.c
++++ b/arch/x86/crypto/poly1305_glue.c
+@@ -178,7 +178,7 @@ static struct shash_alg alg = {
+ static int __init poly1305_simd_mod_init(void)
+ {
+-      if (!cpu_has_xmm2)
++      if (!boot_cpu_has(X86_FEATURE_XMM2))
+               return -ENODEV;
+ #ifdef CONFIG_AS_AVX2
+--- a/arch/x86/crypto/serpent_sse2_glue.c
++++ b/arch/x86/crypto/serpent_sse2_glue.c
+@@ -605,7 +605,7 @@ static struct crypto_alg serpent_algs[10
+ static int __init serpent_sse2_init(void)
+ {
+-      if (!cpu_has_xmm2) {
++      if (!boot_cpu_has(X86_FEATURE_XMM2)) {
+               printk(KERN_INFO "SSE2 instructions are not detected.\n");
+               return -ENODEV;
+       }
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -374,7 +374,6 @@ extern const char * const x86_bug_flags[
+ #define cpu_has_apic          boot_cpu_has(X86_FEATURE_APIC)
+ #define cpu_has_fxsr          boot_cpu_has(X86_FEATURE_FXSR)
+ #define cpu_has_xmm           boot_cpu_has(X86_FEATURE_XMM)
+-#define cpu_has_xmm2          boot_cpu_has(X86_FEATURE_XMM2)
+ #define cpu_has_aes           boot_cpu_has(X86_FEATURE_AES)
+ #define cpu_has_avx           boot_cpu_has(X86_FEATURE_AVX)
+ #define cpu_has_avx2          boot_cpu_has(X86_FEATURE_AVX2)
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -745,7 +745,7 @@ static void init_amd(struct cpuinfo_x86
+       if (c->x86 >= 0xf)
+               set_cpu_cap(c, X86_FEATURE_K8);
+-      if (cpu_has_xmm2) {
++      if (cpu_has(c, X86_FEATURE_XMM2)) {
+               unsigned long long val;
+               int ret;
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -443,7 +443,7 @@ static void init_intel(struct cpuinfo_x8
+                       set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
+       }
+-      if (cpu_has_xmm2)
++      if (cpu_has(c, X86_FEATURE_XMM2))
+               set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+       if (boot_cpu_has(X86_FEATURE_DS)) {
+--- a/arch/x86/lib/usercopy_32.c
++++ b/arch/x86/lib/usercopy_32.c
+@@ -612,7 +612,7 @@ unsigned long __copy_from_user_ll_nocach
+ {
+       stac();
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+-      if (n > 64 && cpu_has_xmm2)
++      if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
+               n = __copy_user_zeroing_intel_nocache(to, from, n);
+       else
+               __copy_user_zeroing(to, from, n);
+@@ -629,7 +629,7 @@ unsigned long __copy_from_user_ll_nocach
+ {
+       stac();
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+-      if (n > 64 && cpu_has_xmm2)
++      if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
+               n = __copy_user_intel_nocache(to, from, n);
+       else
+               __copy_user(to, from, n);
diff --git a/queue-4.4/x86-cpufeature-remove-unused-and-seldomly-used-cpu_has_xx-macros.patch b/queue-4.4/x86-cpufeature-remove-unused-and-seldomly-used-cpu_has_xx-macros.patch
new file mode 100644 (file)
index 0000000..d6e03ed
--- /dev/null
@@ -0,0 +1,478 @@
+From 362f924b64ba0f4be2ee0cb697690c33d40be721 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Mon, 7 Dec 2015 10:39:41 +0100
+Subject: x86/cpufeature: Remove unused and seldomly used cpu_has_xx macros
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 362f924b64ba0f4be2ee0cb697690c33d40be721 upstream.
+
+Those are stupid and code should use static_cpu_has_safe() or
+boot_cpu_has() instead. Kill the least used and unused ones.
+
+The remaining ones need more careful inspection before a conversion can
+happen. On the TODO.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Link: http://lkml.kernel.org/r/1449481182-27541-4-git-send-email-bp@alien8.de
+Cc: David Sterba <dsterba@suse.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Matt Mackall <mpm@selenic.com>
+Cc: Chris Mason <clm@fb.com>
+Cc: Josef Bacik <jbacik@fb.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/chacha20_glue.c             |    2 -
+ arch/x86/crypto/crc32c-intel_glue.c         |    2 -
+ arch/x86/include/asm/cmpxchg_32.h           |    2 -
+ arch/x86/include/asm/cmpxchg_64.h           |    2 -
+ arch/x86/include/asm/cpufeature.h           |   37 +++-------------------------
+ arch/x86/include/asm/xor_32.h               |    2 -
+ arch/x86/kernel/cpu/amd.c                   |    4 +--
+ arch/x86/kernel/cpu/common.c                |    4 ++-
+ arch/x86/kernel/cpu/intel.c                 |    3 +-
+ arch/x86/kernel/cpu/intel_cacheinfo.c       |    6 ++--
+ arch/x86/kernel/cpu/mtrr/generic.c          |    2 -
+ arch/x86/kernel/cpu/mtrr/main.c             |    2 -
+ arch/x86/kernel/cpu/perf_event_amd.c        |    4 +--
+ arch/x86/kernel/cpu/perf_event_amd_uncore.c |   11 ++++----
+ arch/x86/kernel/fpu/init.c                  |    4 +--
+ arch/x86/kernel/hw_breakpoint.c             |    6 +++-
+ arch/x86/kernel/smpboot.c                   |    2 -
+ arch/x86/kernel/vm86_32.c                   |    4 ++-
+ arch/x86/mm/setup_nx.c                      |    4 +--
+ drivers/char/hw_random/via-rng.c            |    5 ++-
+ drivers/crypto/padlock-aes.c                |    2 -
+ drivers/crypto/padlock-sha.c                |    2 -
+ drivers/iommu/intel_irq_remapping.c         |    2 -
+ fs/btrfs/disk-io.c                          |    2 -
+ 24 files changed, 48 insertions(+), 68 deletions(-)
+
+--- a/arch/x86/crypto/chacha20_glue.c
++++ b/arch/x86/crypto/chacha20_glue.c
+@@ -125,7 +125,7 @@ static struct crypto_alg alg = {
+ static int __init chacha20_simd_mod_init(void)
+ {
+-      if (!cpu_has_ssse3)
++      if (!boot_cpu_has(X86_FEATURE_SSSE3))
+               return -ENODEV;
+ #ifdef CONFIG_AS_AVX2
+--- a/arch/x86/crypto/crc32c-intel_glue.c
++++ b/arch/x86/crypto/crc32c-intel_glue.c
+@@ -257,7 +257,7 @@ static int __init crc32c_intel_mod_init(
+       if (!x86_match_cpu(crc32c_cpu_id))
+               return -ENODEV;
+ #ifdef CONFIG_X86_64
+-      if (cpu_has_pclmulqdq) {
++      if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
+               alg.update = crc32c_pcl_intel_update;
+               alg.finup = crc32c_pcl_intel_finup;
+               alg.digest = crc32c_pcl_intel_digest;
+--- a/arch/x86/include/asm/cmpxchg_32.h
++++ b/arch/x86/include/asm/cmpxchg_32.h
+@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(vola
+ #endif
+-#define system_has_cmpxchg_double() cpu_has_cx8
++#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
+ #endif /* _ASM_X86_CMPXCHG_32_H */
+--- a/arch/x86/include/asm/cmpxchg_64.h
++++ b/arch/x86/include/asm/cmpxchg_64.h
+@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u6
+       cmpxchg_local((ptr), (o), (n));                                 \
+ })
+-#define system_has_cmpxchg_double() cpu_has_cx16
++#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
+ #endif /* _ASM_X86_CMPXCHG_64_H */
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -368,58 +368,29 @@ extern const char * const x86_bug_flags[
+ #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
+ #define cpu_has_fpu           boot_cpu_has(X86_FEATURE_FPU)
+-#define cpu_has_de            boot_cpu_has(X86_FEATURE_DE)
+ #define cpu_has_pse           boot_cpu_has(X86_FEATURE_PSE)
+ #define cpu_has_tsc           boot_cpu_has(X86_FEATURE_TSC)
+ #define cpu_has_pge           boot_cpu_has(X86_FEATURE_PGE)
+ #define cpu_has_apic          boot_cpu_has(X86_FEATURE_APIC)
+-#define cpu_has_sep           boot_cpu_has(X86_FEATURE_SEP)
+-#define cpu_has_mtrr          boot_cpu_has(X86_FEATURE_MTRR)
+-#define cpu_has_mmx           boot_cpu_has(X86_FEATURE_MMX)
+ #define cpu_has_fxsr          boot_cpu_has(X86_FEATURE_FXSR)
+ #define cpu_has_xmm           boot_cpu_has(X86_FEATURE_XMM)
+ #define cpu_has_xmm2          boot_cpu_has(X86_FEATURE_XMM2)
+-#define cpu_has_xmm3          boot_cpu_has(X86_FEATURE_XMM3)
+-#define cpu_has_ssse3         boot_cpu_has(X86_FEATURE_SSSE3)
+ #define cpu_has_aes           boot_cpu_has(X86_FEATURE_AES)
+ #define cpu_has_avx           boot_cpu_has(X86_FEATURE_AVX)
+ #define cpu_has_avx2          boot_cpu_has(X86_FEATURE_AVX2)
+-#define cpu_has_ht            boot_cpu_has(X86_FEATURE_HT)
+-#define cpu_has_nx            boot_cpu_has(X86_FEATURE_NX)
+-#define cpu_has_xstore                boot_cpu_has(X86_FEATURE_XSTORE)
+-#define cpu_has_xstore_enabled        boot_cpu_has(X86_FEATURE_XSTORE_EN)
+-#define cpu_has_xcrypt                boot_cpu_has(X86_FEATURE_XCRYPT)
+-#define cpu_has_xcrypt_enabled        boot_cpu_has(X86_FEATURE_XCRYPT_EN)
+-#define cpu_has_ace2          boot_cpu_has(X86_FEATURE_ACE2)
+-#define cpu_has_ace2_enabled  boot_cpu_has(X86_FEATURE_ACE2_EN)
+-#define cpu_has_phe           boot_cpu_has(X86_FEATURE_PHE)
+-#define cpu_has_phe_enabled   boot_cpu_has(X86_FEATURE_PHE_EN)
+-#define cpu_has_pmm           boot_cpu_has(X86_FEATURE_PMM)
+-#define cpu_has_pmm_enabled   boot_cpu_has(X86_FEATURE_PMM_EN)
+-#define cpu_has_ds            boot_cpu_has(X86_FEATURE_DS)
+-#define cpu_has_pebs          boot_cpu_has(X86_FEATURE_PEBS)
+ #define cpu_has_clflush               boot_cpu_has(X86_FEATURE_CLFLUSH)
+-#define cpu_has_bts           boot_cpu_has(X86_FEATURE_BTS)
+ #define cpu_has_gbpages               boot_cpu_has(X86_FEATURE_GBPAGES)
+ #define cpu_has_arch_perfmon  boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
+ #define cpu_has_pat           boot_cpu_has(X86_FEATURE_PAT)
+-#define cpu_has_xmm4_1                boot_cpu_has(X86_FEATURE_XMM4_1)
+-#define cpu_has_xmm4_2                boot_cpu_has(X86_FEATURE_XMM4_2)
+ #define cpu_has_x2apic                boot_cpu_has(X86_FEATURE_X2APIC)
+ #define cpu_has_xsave         boot_cpu_has(X86_FEATURE_XSAVE)
+-#define cpu_has_xsaveopt      boot_cpu_has(X86_FEATURE_XSAVEOPT)
+ #define cpu_has_xsaves                boot_cpu_has(X86_FEATURE_XSAVES)
+ #define cpu_has_osxsave               boot_cpu_has(X86_FEATURE_OSXSAVE)
+ #define cpu_has_hypervisor    boot_cpu_has(X86_FEATURE_HYPERVISOR)
+-#define cpu_has_pclmulqdq     boot_cpu_has(X86_FEATURE_PCLMULQDQ)
+-#define cpu_has_perfctr_core  boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
+-#define cpu_has_perfctr_nb    boot_cpu_has(X86_FEATURE_PERFCTR_NB)
+-#define cpu_has_perfctr_l2    boot_cpu_has(X86_FEATURE_PERFCTR_L2)
+-#define cpu_has_cx8           boot_cpu_has(X86_FEATURE_CX8)
+-#define cpu_has_cx16          boot_cpu_has(X86_FEATURE_CX16)
+-#define cpu_has_eager_fpu     boot_cpu_has(X86_FEATURE_EAGER_FPU)
+-#define cpu_has_topoext               boot_cpu_has(X86_FEATURE_TOPOEXT)
+-#define cpu_has_bpext         boot_cpu_has(X86_FEATURE_BPEXT)
++/*
++ * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
++ * fast paths and boot_cpu_has() otherwise!
++ */
+ #if __GNUC__ >= 4
+ extern void warn_pre_alternatives(void);
+--- a/arch/x86/include/asm/xor_32.h
++++ b/arch/x86/include/asm/xor_32.h
+@@ -553,7 +553,7 @@ do {                                                       \
+       if (cpu_has_xmm) {                              \
+               xor_speed(&xor_block_pIII_sse);         \
+               xor_speed(&xor_block_sse_pf64);         \
+-      } else if (cpu_has_mmx) {                       \
++      } else if (boot_cpu_has(X86_FEATURE_MMX)) {     \
+               xor_speed(&xor_block_pII_mmx);          \
+               xor_speed(&xor_block_p5_mmx);           \
+       } else {                                        \
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -304,7 +304,7 @@ static void amd_get_topology(struct cpui
+       int cpu = smp_processor_id();
+       /* get information required for multi-node processors */
+-      if (cpu_has_topoext) {
++      if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+               u32 eax, ebx, ecx, edx;
+               cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+@@ -954,7 +954,7 @@ static bool cpu_has_amd_erratum(struct c
+ void set_dr_addr_mask(unsigned long mask, int dr)
+ {
+-      if (!cpu_has_bpext)
++      if (!boot_cpu_has(X86_FEATURE_BPEXT))
+               return;
+       switch (dr) {
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1539,7 +1539,9 @@ void cpu_init(void)
+       printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+-      if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de)
++      if (cpu_feature_enabled(X86_FEATURE_VME) ||
++          cpu_has_tsc ||
++          boot_cpu_has(X86_FEATURE_DE))
+               cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+       load_current_idt();
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -445,7 +445,8 @@ static void init_intel(struct cpuinfo_x8
+       if (cpu_has_xmm2)
+               set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+-      if (cpu_has_ds) {
++
++      if (boot_cpu_has(X86_FEATURE_DS)) {
+               unsigned int l1;
+               rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
+               if (!(l1 & (1<<11)))
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -591,7 +591,7 @@ cpuid4_cache_lookup_regs(int index, stru
+       unsigned                edx;
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+-              if (cpu_has_topoext)
++              if (boot_cpu_has(X86_FEATURE_TOPOEXT))
+                       cpuid_count(0x8000001d, index, &eax.full,
+                                   &ebx.full, &ecx.full, &edx);
+               else
+@@ -637,7 +637,7 @@ static int find_num_cache_leaves(struct
+ void init_amd_cacheinfo(struct cpuinfo_x86 *c)
+ {
+-      if (cpu_has_topoext) {
++      if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+               num_cache_leaves = find_num_cache_leaves(c);
+       } else if (c->extended_cpuid_level >= 0x80000006) {
+               if (cpuid_edx(0x80000006) & 0xf000)
+@@ -809,7 +809,7 @@ static int __cache_amd_cpumap_setup(unsi
+       struct cacheinfo *this_leaf;
+       int i, sibling;
+-      if (cpu_has_topoext) {
++      if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+               unsigned int apicid, nshared, first, last;
+               this_leaf = this_cpu_ci->info_list + index;
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -349,7 +349,7 @@ static void get_fixed_ranges(mtrr_type *
+ void mtrr_save_fixed_ranges(void *info)
+ {
+-      if (cpu_has_mtrr)
++      if (boot_cpu_has(X86_FEATURE_MTRR))
+               get_fixed_ranges(mtrr_state.fixed_ranges);
+ }
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -682,7 +682,7 @@ void __init mtrr_bp_init(void)
+       phys_addr = 32;
+-      if (cpu_has_mtrr) {
++      if (boot_cpu_has(X86_FEATURE_MTRR)) {
+               mtrr_if = &generic_mtrr_ops;
+               size_or_mask = SIZE_OR_MASK_BITS(36);
+               size_and_mask = 0x00f00000;
+--- a/arch/x86/kernel/cpu/perf_event_amd.c
++++ b/arch/x86/kernel/cpu/perf_event_amd.c
+@@ -160,7 +160,7 @@ static inline int amd_pmu_addr_offset(in
+       if (offset)
+               return offset;
+-      if (!cpu_has_perfctr_core)
++      if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
+               offset = index;
+       else
+               offset = index << 1;
+@@ -652,7 +652,7 @@ static __initconst const struct x86_pmu
+ static int __init amd_core_pmu_init(void)
+ {
+-      if (!cpu_has_perfctr_core)
++      if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
+               return 0;
+       switch (boot_cpu_data.x86) {
+--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
++++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+@@ -523,10 +523,10 @@ static int __init amd_uncore_init(void)
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+               goto fail_nodev;
+-      if (!cpu_has_topoext)
++      if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
+               goto fail_nodev;
+-      if (cpu_has_perfctr_nb) {
++      if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
+               amd_uncore_nb = alloc_percpu(struct amd_uncore *);
+               if (!amd_uncore_nb) {
+                       ret = -ENOMEM;
+@@ -540,7 +540,7 @@ static int __init amd_uncore_init(void)
+               ret = 0;
+       }
+-      if (cpu_has_perfctr_l2) {
++      if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
+               amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
+               if (!amd_uncore_l2) {
+                       ret = -ENOMEM;
+@@ -583,10 +583,11 @@ fail_online:
+       /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
+       amd_uncore_nb = amd_uncore_l2 = NULL;
+-      if (cpu_has_perfctr_l2)
++
++      if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
+               perf_pmu_unregister(&amd_l2_pmu);
+ fail_l2:
+-      if (cpu_has_perfctr_nb)
++      if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
+               perf_pmu_unregister(&amd_nb_pmu);
+       if (amd_uncore_l2)
+               free_percpu(amd_uncore_l2);
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -15,7 +15,7 @@
+  */
+ static void fpu__init_cpu_ctx_switch(void)
+ {
+-      if (!cpu_has_eager_fpu)
++      if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
+               stts();
+       else
+               clts();
+@@ -279,7 +279,7 @@ static void __init fpu__init_system_ctx_
+       current_thread_info()->status = 0;
+       /* Auto enable eagerfpu for xsaveopt */
+-      if (cpu_has_xsaveopt && eagerfpu != DISABLE)
++      if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
+               eagerfpu = ENABLE;
+       if (xfeatures_mask & XFEATURE_MASK_EAGER) {
+--- a/arch/x86/kernel/hw_breakpoint.c
++++ b/arch/x86/kernel/hw_breakpoint.c
+@@ -300,6 +300,10 @@ static int arch_build_bp_info(struct per
+                       return -EINVAL;
+               if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
+                       return -EINVAL;
++
++              if (!boot_cpu_has(X86_FEATURE_BPEXT))
++                      return -EOPNOTSUPP;
++
+               /*
+                * It's impossible to use a range breakpoint to fake out
+                * user vs kernel detection because bp_len - 1 can't
+@@ -307,8 +311,6 @@ static int arch_build_bp_info(struct per
+                * breakpoints, then we'll have to check for kprobe-blacklisted
+                * addresses anywhere in the range.
+                */
+-              if (!cpu_has_bpext)
+-                      return -EOPNOTSUPP;
+               info->mask = bp->attr.bp_len - 1;
+               info->len = X86_BREAKPOINT_LEN_1;
+       }
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -295,7 +295,7 @@ do {                                                                       \
+ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+ {
+-      if (cpu_has_topoext) {
++      if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+               int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
+               if (c->phys_proc_id == o->phys_proc_id &&
+--- a/arch/x86/kernel/vm86_32.c
++++ b/arch/x86/kernel/vm86_32.c
+@@ -357,8 +357,10 @@ static long do_sys_vm86(struct vm86plus_
+       tss = &per_cpu(cpu_tss, get_cpu());
+       /* make room for real-mode segments */
+       tsk->thread.sp0 += 16;
+-      if (cpu_has_sep)
++
++      if (static_cpu_has_safe(X86_FEATURE_SEP))
+               tsk->thread.sysenter_cs = 0;
++
+       load_sp0(tss, &tsk->thread);
+       put_cpu();
+--- a/arch/x86/mm/setup_nx.c
++++ b/arch/x86/mm/setup_nx.c
+@@ -31,7 +31,7 @@ early_param("noexec", noexec_setup);
+ void x86_configure_nx(void)
+ {
+-      if (cpu_has_nx && !disable_nx)
++      if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
+               __supported_pte_mask |= _PAGE_NX;
+       else
+               __supported_pte_mask &= ~_PAGE_NX;
+@@ -39,7 +39,7 @@ void x86_configure_nx(void)
+ void __init x86_report_nx(void)
+ {
+-      if (!cpu_has_nx) {
++      if (!boot_cpu_has(X86_FEATURE_NX)) {
+               printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
+                      "missing in CPU!\n");
+       } else {
+--- a/drivers/char/hw_random/via-rng.c
++++ b/drivers/char/hw_random/via-rng.c
+@@ -140,7 +140,7 @@ static int via_rng_init(struct hwrng *rn
+        * RNG configuration like it used to be the case in this
+        * register */
+       if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
+-              if (!cpu_has_xstore_enabled) {
++              if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
+                       pr_err(PFX "can't enable hardware RNG "
+                               "if XSTORE is not enabled\n");
+                       return -ENODEV;
+@@ -200,8 +200,9 @@ static int __init mod_init(void)
+ {
+       int err;
+-      if (!cpu_has_xstore)
++      if (!boot_cpu_has(X86_FEATURE_XSTORE))
+               return -ENODEV;
++
+       pr_info("VIA RNG detected\n");
+       err = hwrng_register(&via_rng);
+       if (err) {
+--- a/drivers/crypto/padlock-aes.c
++++ b/drivers/crypto/padlock-aes.c
+@@ -515,7 +515,7 @@ static int __init padlock_init(void)
+       if (!x86_match_cpu(padlock_cpu_id))
+               return -ENODEV;
+-      if (!cpu_has_xcrypt_enabled) {
++      if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
+               printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
+               return -ENODEV;
+       }
+--- a/drivers/crypto/padlock-sha.c
++++ b/drivers/crypto/padlock-sha.c
+@@ -540,7 +540,7 @@ static int __init padlock_init(void)
+       struct shash_alg *sha1;
+       struct shash_alg *sha256;
+-      if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled)
++      if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN))
+               return -ENODEV;
+       /* Register the newly added algorithm module if on *
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -753,7 +753,7 @@ static inline void set_irq_posting_cap(v
+                * should have X86_FEATURE_CX16 support, this has been confirmed
+                * with Intel hardware guys.
+                */
+-              if ( cpu_has_cx16 )
++              if (boot_cpu_has(X86_FEATURE_CX16))
+                       intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
+               for_each_iommu(iommu, drhd)
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -923,7 +923,7 @@ static int check_async_write(struct inod
+       if (bio_flags & EXTENT_BIO_TREE_LOG)
+               return 0;
+ #ifdef CONFIG_X86
+-      if (cpu_has_xmm4_2)
++      if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
+               return 0;
+ #endif
+       return 1;
diff --git a/queue-4.4/x86-cpufeature-replace-cpu_has_aes-with-boot_cpu_has-usage.patch b/queue-4.4/x86-cpufeature-replace-cpu_has_aes-with-boot_cpu_has-usage.patch
new file mode 100644 (file)
index 0000000..9fcf5a6
--- /dev/null
@@ -0,0 +1,64 @@
+From 1f4dd7938ea575a2d1972e180eaef31e6edb1808 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Mon, 4 Apr 2016 22:24:55 +0200
+Subject: x86/cpufeature: Replace cpu_has_aes with boot_cpu_has() usage
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 1f4dd7938ea575a2d1972e180eaef31e6edb1808 upstream.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-crypto@vger.kernel.org
+Link: http://lkml.kernel.org/r/1459801503-15600-3-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/camellia_aesni_avx2_glue.c |    3 ++-
+ arch/x86/crypto/camellia_aesni_avx_glue.c  |    4 +++-
+ arch/x86/include/asm/cpufeature.h          |    1 -
+ 3 files changed, 5 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
++++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
+@@ -562,7 +562,8 @@ static int __init camellia_aesni_init(vo
+ {
+       const char *feature_name;
+-      if (!boot_cpu_has(X86_FEATURE_AVX2) || !cpu_has_avx || !cpu_has_aes ||
++      if (!boot_cpu_has(X86_FEATURE_AVX2) || !cpu_has_avx ||
++          !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX2 or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
++++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
+@@ -554,7 +554,9 @@ static int __init camellia_aesni_init(vo
+ {
+       const char *feature_name;
+-      if (!cpu_has_avx || !cpu_has_aes || !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
++      if (!cpu_has_avx ||
++          !boot_cpu_has(X86_FEATURE_AES) ||
++          !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+       }
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -373,7 +373,6 @@ extern const char * const x86_bug_flags[
+ #define cpu_has_apic          boot_cpu_has(X86_FEATURE_APIC)
+ #define cpu_has_fxsr          boot_cpu_has(X86_FEATURE_FXSR)
+ #define cpu_has_xmm           boot_cpu_has(X86_FEATURE_XMM)
+-#define cpu_has_aes           boot_cpu_has(X86_FEATURE_AES)
+ #define cpu_has_avx           boot_cpu_has(X86_FEATURE_AVX)
+ #define cpu_has_xsave         boot_cpu_has(X86_FEATURE_XSAVE)
+ #define cpu_has_xsaves                boot_cpu_has(X86_FEATURE_XSAVES)
diff --git a/queue-4.4/x86-cpufeature-replace-cpu_has_avx2-with-boot_cpu_has-usage.patch b/queue-4.4/x86-cpufeature-replace-cpu_has_avx2-with-boot_cpu_has-usage.patch
new file mode 100644 (file)
index 0000000..d35dc9b
--- /dev/null
@@ -0,0 +1,85 @@
+From abcfdfe07de75f830cbec1aa3eb17833a0166697 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Mon, 4 Apr 2016 22:24:54 +0200
+Subject: x86/cpufeature: Replace cpu_has_avx2 with boot_cpu_has() usage
+
+From: Borislav Petkov <bp@suse.de>
+
+commit abcfdfe07de75f830cbec1aa3eb17833a0166697 upstream.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-crypto@vger.kernel.org
+Link: http://lkml.kernel.org/r/1459801503-15600-2-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/camellia_aesni_avx2_glue.c |    2 +-
+ arch/x86/crypto/chacha20_glue.c            |    2 +-
+ arch/x86/crypto/poly1305_glue.c            |    2 +-
+ arch/x86/crypto/serpent_avx2_glue.c        |    2 +-
+ arch/x86/include/asm/cpufeature.h          |    1 -
+ 5 files changed, 4 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
++++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
+@@ -562,7 +562,7 @@ static int __init camellia_aesni_init(vo
+ {
+       const char *feature_name;
+-      if (!cpu_has_avx2 || !cpu_has_avx || !cpu_has_aes ||
++      if (!boot_cpu_has(X86_FEATURE_AVX2) || !cpu_has_avx || !cpu_has_aes ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX2 or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+--- a/arch/x86/crypto/chacha20_glue.c
++++ b/arch/x86/crypto/chacha20_glue.c
+@@ -129,7 +129,7 @@ static int __init chacha20_simd_mod_init
+               return -ENODEV;
+ #ifdef CONFIG_AS_AVX2
+-      chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
++      chacha20_use_avx2 = cpu_has_avx && boot_cpu_has(X86_FEATURE_AVX2) &&
+                           cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
+ #endif
+       return crypto_register_alg(&alg);
+--- a/arch/x86/crypto/poly1305_glue.c
++++ b/arch/x86/crypto/poly1305_glue.c
+@@ -182,7 +182,7 @@ static int __init poly1305_simd_mod_init
+               return -ENODEV;
+ #ifdef CONFIG_AS_AVX2
+-      poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
++      poly1305_use_avx2 = cpu_has_avx && boot_cpu_has(X86_FEATURE_AVX2) &&
+                           cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
+       alg.descsize = sizeof(struct poly1305_simd_desc_ctx);
+       if (poly1305_use_avx2)
+--- a/arch/x86/crypto/serpent_avx2_glue.c
++++ b/arch/x86/crypto/serpent_avx2_glue.c
+@@ -538,7 +538,7 @@ static int __init init(void)
+ {
+       const char *feature_name;
+-      if (!cpu_has_avx2 || !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
++      if (!boot_cpu_has(X86_FEATURE_AVX2) || !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX2 instructions are not detected.\n");
+               return -ENODEV;
+       }
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -375,7 +375,6 @@ extern const char * const x86_bug_flags[
+ #define cpu_has_xmm           boot_cpu_has(X86_FEATURE_XMM)
+ #define cpu_has_aes           boot_cpu_has(X86_FEATURE_AES)
+ #define cpu_has_avx           boot_cpu_has(X86_FEATURE_AVX)
+-#define cpu_has_avx2          boot_cpu_has(X86_FEATURE_AVX2)
+ #define cpu_has_xsave         boot_cpu_has(X86_FEATURE_XSAVE)
+ #define cpu_has_xsaves                boot_cpu_has(X86_FEATURE_XSAVES)
+ #define cpu_has_hypervisor    boot_cpu_has(X86_FEATURE_HYPERVISOR)
diff --git a/queue-4.4/x86-fpu-default-eagerfpu-on-on-all-cpus.patch b/queue-4.4/x86-fpu-default-eagerfpu-on-on-all-cpus.patch
new file mode 100644 (file)
index 0000000..b2cfe46
--- /dev/null
@@ -0,0 +1,89 @@
+From 58122bf1d856a4ea9581d62a07c557d997d46a19 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sun, 24 Jan 2016 14:38:10 -0800
+Subject: x86/fpu: Default eagerfpu=on on all CPUs
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 58122bf1d856a4ea9581d62a07c557d997d46a19 upstream.
+
+We have eager and lazy FPU modes, introduced in:
+
+  304bceda6a18 ("x86, fpu: use non-lazy fpu restore for processors supporting xsave")
+
+The result is rather messy.  There are two code paths in almost all
+of the FPU code, and only one of them (the eager case) is tested
+frequently, since most kernel developers have new enough hardware
+that we use eagerfpu.
+
+It seems that, on any remotely recent hardware, eagerfpu is a win:
+glibc uses SSE2, so laziness is probably overoptimistic, and, in any
+case, manipulating TS is far slower that saving and restoring the
+full state.  (Stores to CR0.TS are serializing and are poorly
+optimized.)
+
+To try to shake out any latent issues on old hardware, this changes
+the default to eager on all CPUs.  If no performance or functionality
+problems show up, a subsequent patch could remove lazy mode entirely.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/ac290de61bf08d9cfc2664a4f5080257ffc1075a.1453675014.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/fpu/init.c |   13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -252,7 +252,10 @@ static void __init fpu__init_system_xsta
+  * not only saved the restores along the way, but we also have the
+  * FPU ready to be used for the original task.
+  *
+- * 'eager' switching is used on modern CPUs, there we switch the FPU
++ * 'lazy' is deprecated because it's almost never a performance win
++ * and it's much more complicated than 'eager'.
++ *
++ * 'eager' switching is by default on all CPUs, there we switch the FPU
+  * state during every context switch, regardless of whether the task
+  * has used FPU instructions in that time slice or not. This is done
+  * because modern FPU context saving instructions are able to optimize
+@@ -263,7 +266,7 @@ static void __init fpu__init_system_xsta
+  *   to use 'eager' restores, if we detect that a task is using the FPU
+  *   frequently. See the fpu->counter logic in fpu/internal.h for that. ]
+  */
+-static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
++static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
+ /*
+  * Find supported xfeatures based on cpu features and command-line input.
+@@ -340,15 +343,9 @@ static void __init fpu__init_system_ctx_
+  */
+ static void __init fpu__init_parse_early_param(void)
+ {
+-      /*
+-       * No need to check "eagerfpu=auto" again, since it is the
+-       * initial default.
+-       */
+       if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
+               eagerfpu = DISABLE;
+               fpu__clear_eager_fpu_features();
+-      } else if (cmdline_find_option_bool(boot_command_line, "eagerfpu=on")) {
+-              eagerfpu = ENABLE;
+       }
+       if (cmdline_find_option_bool(boot_command_line, "no387"))
diff --git a/queue-4.4/x86-fpu-disable-avx-when-eagerfpu-is-off.patch b/queue-4.4/x86-fpu-disable-avx-when-eagerfpu-is-off.patch
new file mode 100644 (file)
index 0000000..a879d15
--- /dev/null
@@ -0,0 +1,100 @@
+From 394db20ca240741a08d472173db13d6f6a6e5a28 Mon Sep 17 00:00:00 2001
+From: yu-cheng yu <yu-cheng.yu@intel.com>
+Date: Wed, 6 Jan 2016 14:24:54 -0800
+Subject: x86/fpu: Disable AVX when eagerfpu is off
+
+From: yu-cheng yu <yu-cheng.yu@intel.com>
+
+commit 394db20ca240741a08d472173db13d6f6a6e5a28 upstream.
+
+When "eagerfpu=off" is given as a command-line input, the kernel
+should disable AVX support.
+
+The Task Switched bit used for lazy context switching does not
+support AVX. If AVX is enabled without eagerfpu context
+switching, one task's AVX state could become corrupted or leak
+to other tasks. This is a bug and has bad security implications.
+
+This only affects systems that have AVX/AVX2/AVX512 and this
+issue will be found only when one actually uses AVX/AVX2/AVX512
+_AND_ does eagerfpu=off.
+
+Reference: Intel Software Developer's Manual Vol. 3A
+
+Sec. 2.5 Control Registers:
+TS Task Switched bit (bit 3 of CR0) -- Allows the saving of the
+x87 FPU/ MMX/SSE/SSE2/SSE3/SSSE3/SSE4 context on a task switch
+to be delayed until an x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4
+instruction is actually executed by the new task.
+
+Sec. 13.4.1 Using the TS Flag to Control the Saving of the X87
+FPU and SSE State
+When the TS flag is set, the processor monitors the instruction
+stream for x87 FPU, MMX, SSE instructions. When the processor
+detects one of these instructions, it raises a
+device-not-available exeception (#NM) prior to executing the
+instruction.
+
+Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Ravi V. Shankar <ravi.v.shankar@intel.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/1452119094-7252-5-git-send-email-yu-cheng.yu@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/fpu/xstate.h |   11 ++++++-----
+ arch/x86/kernel/fpu/init.c        |    6 ++++++
+ 2 files changed, 12 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/fpu/xstate.h
++++ b/arch/x86/include/asm/fpu/xstate.h
+@@ -20,15 +20,16 @@
+ /* Supported features which support lazy state saving */
+ #define XFEATURE_MASK_LAZY    (XFEATURE_MASK_FP | \
+-                               XFEATURE_MASK_SSE | \
++                               XFEATURE_MASK_SSE)
++
++/* Supported features which require eager state saving */
++#define XFEATURE_MASK_EAGER   (XFEATURE_MASK_BNDREGS | \
++                               XFEATURE_MASK_BNDCSR | \
+                                XFEATURE_MASK_YMM | \
+-                               XFEATURE_MASK_OPMASK | \
++                               XFEATURE_MASK_OPMASK | \
+                                XFEATURE_MASK_ZMM_Hi256 | \
+                                XFEATURE_MASK_Hi16_ZMM)
+-/* Supported features which require eager state saving */
+-#define XFEATURE_MASK_EAGER   (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
+-
+ /* All currently supported features */
+ #define XCNTXT_MASK   (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -292,6 +292,12 @@ u64 __init fpu__get_supported_xfeatures_
+ static void __init fpu__clear_eager_fpu_features(void)
+ {
+       setup_clear_cpu_cap(X86_FEATURE_MPX);
++      setup_clear_cpu_cap(X86_FEATURE_AVX);
++      setup_clear_cpu_cap(X86_FEATURE_AVX2);
++      setup_clear_cpu_cap(X86_FEATURE_AVX512F);
++      setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
++      setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
++      setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
+ }
+ /*
diff --git a/queue-4.4/x86-fpu-disable-mpx-when-eagerfpu-is-off.patch b/queue-4.4/x86-fpu-disable-mpx-when-eagerfpu-is-off.patch
new file mode 100644 (file)
index 0000000..c8dbdb6
--- /dev/null
@@ -0,0 +1,149 @@
+From a5fe93a549c54838063d2952dd9643b0b18aa67f Mon Sep 17 00:00:00 2001
+From: yu-cheng yu <yu-cheng.yu@intel.com>
+Date: Wed, 6 Jan 2016 14:24:53 -0800
+Subject: x86/fpu: Disable MPX when eagerfpu is off
+
+From: yu-cheng yu <yu-cheng.yu@intel.com>
+
+commit a5fe93a549c54838063d2952dd9643b0b18aa67f upstream.
+
+This issue is a fallout from the command-line parsing move.
+
+When "eagerfpu=off" is given as a command-line input, the kernel
+should disable MPX support. The decision for turning off MPX was
+made in fpu__init_system_ctx_switch(), which is after the
+selection of the XSAVE format. This patch fixes it by getting
+that decision done earlier in fpu__init_system_xstate().
+
+Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Ravi V. Shankar <ravi.v.shankar@intel.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/1452119094-7252-4-git-send-email-yu-cheng.yu@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/fpu/internal.h |    1 
+ arch/x86/kernel/fpu/init.c          |   56 ++++++++++++++++++++++++++++--------
+ arch/x86/kernel/fpu/xstate.c        |    3 -
+ 3 files changed, 46 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -42,6 +42,7 @@ extern void fpu__init_cpu_xstate(void);
+ extern void fpu__init_system(struct cpuinfo_x86 *c);
+ extern void fpu__init_check_bugs(void);
+ extern void fpu__resume_cpu(void);
++extern u64 fpu__get_supported_xfeatures_mask(void);
+ /*
+  * Debugging facility:
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -266,7 +266,45 @@ static void __init fpu__init_system_xsta
+ static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
+ /*
++ * Find supported xfeatures based on cpu features and command-line input.
++ * This must be called after fpu__init_parse_early_param() is called and
++ * xfeatures_mask is enumerated.
++ */
++u64 __init fpu__get_supported_xfeatures_mask(void)
++{
++      /* Support all xfeatures known to us */
++      if (eagerfpu != DISABLE)
++              return XCNTXT_MASK;
++
++      /* Warning of xfeatures being disabled for no eagerfpu mode */
++      if (xfeatures_mask & XFEATURE_MASK_EAGER) {
++              pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
++                      xfeatures_mask & XFEATURE_MASK_EAGER);
++      }
++
++      /* Return a mask that masks out all features requiring eagerfpu mode */
++      return ~XFEATURE_MASK_EAGER;
++}
++
++/*
++ * Disable features dependent on eagerfpu.
++ */
++static void __init fpu__clear_eager_fpu_features(void)
++{
++      setup_clear_cpu_cap(X86_FEATURE_MPX);
++}
++
++/*
+  * Pick the FPU context switching strategy:
++ *
++ * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of
++ * the following is true:
++ *
++ * (1) the cpu has xsaveopt, as it has the optimization and doing eager
++ *     FPU switching has a relatively low cost compared to a plain xsave;
++ * (2) the cpu has xsave features (e.g. MPX) that depend on eager FPU
++ *     switching. Should the kernel boot with noxsaveopt, we support MPX
++ *     with eager FPU switching at a higher cost.
+  */
+ static void __init fpu__init_system_ctx_switch(void)
+ {
+@@ -278,19 +316,11 @@ static void __init fpu__init_system_ctx_
+       WARN_ON_FPU(current->thread.fpu.fpstate_active);
+       current_thread_info()->status = 0;
+-      /* Auto enable eagerfpu for xsaveopt */
+       if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
+               eagerfpu = ENABLE;
+-      if (xfeatures_mask & XFEATURE_MASK_EAGER) {
+-              if (eagerfpu == DISABLE) {
+-                      pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
+-                             xfeatures_mask & XFEATURE_MASK_EAGER);
+-                      xfeatures_mask &= ~XFEATURE_MASK_EAGER;
+-              } else {
+-                      eagerfpu = ENABLE;
+-              }
+-      }
++      if (xfeatures_mask & XFEATURE_MASK_EAGER)
++              eagerfpu = ENABLE;
+       if (eagerfpu == ENABLE)
+               setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
+@@ -308,10 +338,12 @@ static void __init fpu__init_parse_early
+        * No need to check "eagerfpu=auto" again, since it is the
+        * initial default.
+        */
+-      if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off"))
++      if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
+               eagerfpu = DISABLE;
+-      else if (cmdline_find_option_bool(boot_command_line, "eagerfpu=on"))
++              fpu__clear_eager_fpu_features();
++      } else if (cmdline_find_option_bool(boot_command_line, "eagerfpu=on")) {
+               eagerfpu = ENABLE;
++      }
+       if (cmdline_find_option_bool(boot_command_line, "no387"))
+               setup_clear_cpu_cap(X86_FEATURE_FPU);
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -632,8 +632,7 @@ void __init fpu__init_system_xstate(void
+               BUG();
+       }
+-      /* Support only the state known to the OS: */
+-      xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
++      xfeatures_mask &= fpu__get_supported_xfeatures_mask();
+       /* Enable xstate instructions to be able to continue with initialization: */
+       fpu__init_cpu_xstate();
diff --git a/queue-4.4/x86-fpu-fix-eager-fpu-handling-on-legacy-fpu-machines.patch b/queue-4.4/x86-fpu-fix-eager-fpu-handling-on-legacy-fpu-machines.patch
new file mode 100644 (file)
index 0000000..48aa13a
--- /dev/null
@@ -0,0 +1,82 @@
+From 6e6867093de35141f0a76b66ac13f9f2e2c8e77a Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@alien8.de>
+Date: Fri, 11 Mar 2016 12:32:06 +0100
+Subject: x86/fpu: Fix eager-FPU handling on legacy FPU machines
+
+From: Borislav Petkov <bp@alien8.de>
+
+commit 6e6867093de35141f0a76b66ac13f9f2e2c8e77a upstream.
+
+i486 derived cores like Intel Quark support only the very old,
+legacy x87 FPU (FSAVE/FRSTOR, CPUID bit FXSR is not set), and
+our FPU code wasn't handling the saving and restoring there
+properly in the 'eagerfpu' case.
+
+So after we made eagerfpu the default for all CPU types:
+
+  58122bf1d856 x86/fpu: Default eagerfpu=on on all CPUs
+
+these old FPU designs broke. First, Andy Shevchenko reported a splat:
+
+  WARNING: CPU: 0 PID: 823 at arch/x86/include/asm/fpu/internal.h:163 fpu__clear+0x8c/0x160
+
+which was us trying to execute FXRSTOR on those machines even though
+they don't support it.
+
+After taking care of that, Bryan O'Donoghue reported that a simple FPU
+test still failed because we weren't initializing the FPU state properly
+on those machines.
+
+Take care of all that.
+
+Reported-and-tested-by: Bryan O'Donoghue <pure.logic@nexus-software.ie>
+Reported-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Yu-cheng <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/20160311113206.GD4312@pd.tnic
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/fpu/core.c |    4 +++-
+ arch/x86/kernel/fpu/init.c |    2 +-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -409,8 +409,10 @@ static inline void copy_init_fpstate_to_
+ {
+       if (use_xsave())
+               copy_kernel_to_xregs(&init_fpstate.xsave, -1);
+-      else
++      else if (static_cpu_has(X86_FEATURE_FXSR))
+               copy_kernel_to_fxregs(&init_fpstate.fxsave);
++      else
++              copy_kernel_to_fregs(&init_fpstate.fsave);
+ }
+ /*
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -135,7 +135,7 @@ static void __init fpu__init_system_gene
+        * Set up the legacy init FPU context. (xstate init might overwrite this
+        * with a more modern format, if the CPU supports it.)
+        */
+-      fpstate_init_fxstate(&init_fpstate.fxsave);
++      fpstate_init(&init_fpstate);
+       fpu__init_system_mxcsr();
+ }
diff --git a/queue-4.4/x86-fpu-fix-early-fpu-command-line-parsing.patch b/queue-4.4/x86-fpu-fix-early-fpu-command-line-parsing.patch
new file mode 100644 (file)
index 0000000..327c41d
--- /dev/null
@@ -0,0 +1,190 @@
+From 4f81cbafcce2c603db7865e9d0e461f7947d77d4 Mon Sep 17 00:00:00 2001
+From: yu-cheng yu <yu-cheng.yu@intel.com>
+Date: Wed, 6 Jan 2016 14:24:51 -0800
+Subject: x86/fpu: Fix early FPU command-line parsing
+
+From: yu-cheng yu <yu-cheng.yu@intel.com>
+
+commit 4f81cbafcce2c603db7865e9d0e461f7947d77d4 upstream.
+
+The function fpu__init_system() is executed before
+parse_early_param(). This causes wrong FPU configuration. This
+patch fixes this issue by parsing boot_command_line in the
+beginning of fpu__init_system().
+
+With all four patches in this series, each parameter disables
+features as the following:
+
+eagerfpu=off: eagerfpu, avx, avx2, avx512, mpx
+no387: fpu
+nofxsr: fxsr, fxsropt, xmm
+noxsave: xsave, xsaveopt, xsaves, xsavec, avx, avx2, avx512,
+mpx, xgetbv1 noxsaveopt: xsaveopt
+noxsaves: xsaves
+
+Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Ravi V. Shankar <ravi.v.shankar@intel.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/1452119094-7252-2-git-send-email-yu-cheng.yu@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/fpu/init.c |  109 +++++++++++++++------------------------------
+ 1 file changed, 38 insertions(+), 71 deletions(-)
+
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -3,8 +3,11 @@
+  */
+ #include <asm/fpu/internal.h>
+ #include <asm/tlbflush.h>
++#include <asm/setup.h>
++#include <asm/cmdline.h>
+ #include <linux/sched.h>
++#include <linux/init.h>
+ /*
+  * Initialize the TS bit in CR0 according to the style of context-switches
+@@ -262,18 +265,6 @@ static void __init fpu__init_system_xsta
+  */
+ static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
+-static int __init eager_fpu_setup(char *s)
+-{
+-      if (!strcmp(s, "on"))
+-              eagerfpu = ENABLE;
+-      else if (!strcmp(s, "off"))
+-              eagerfpu = DISABLE;
+-      else if (!strcmp(s, "auto"))
+-              eagerfpu = AUTO;
+-      return 1;
+-}
+-__setup("eagerfpu=", eager_fpu_setup);
+-
+ /*
+  * Pick the FPU context switching strategy:
+  */
+@@ -308,11 +299,46 @@ static void __init fpu__init_system_ctx_
+ }
+ /*
++ * We parse fpu parameters early because fpu__init_system() is executed
++ * before parse_early_param().
++ */
++static void __init fpu__init_parse_early_param(void)
++{
++      /*
++       * No need to check "eagerfpu=auto" again, since it is the
++       * initial default.
++       */
++      if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off"))
++              eagerfpu = DISABLE;
++      else if (cmdline_find_option_bool(boot_command_line, "eagerfpu=on"))
++              eagerfpu = ENABLE;
++
++      if (cmdline_find_option_bool(boot_command_line, "no387"))
++              setup_clear_cpu_cap(X86_FEATURE_FPU);
++
++      if (cmdline_find_option_bool(boot_command_line, "nofxsr")) {
++              setup_clear_cpu_cap(X86_FEATURE_FXSR);
++              setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
++              setup_clear_cpu_cap(X86_FEATURE_XMM);
++      }
++
++      if (cmdline_find_option_bool(boot_command_line, "noxsave"))
++              fpu__xstate_clear_all_cpu_caps();
++
++      if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
++              setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
++
++      if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
++              setup_clear_cpu_cap(X86_FEATURE_XSAVES);
++}
++
++/*
+  * Called on the boot CPU once per system bootup, to set up the initial
+  * FPU state that is later cloned into all processes:
+  */
+ void __init fpu__init_system(struct cpuinfo_x86 *c)
+ {
++      fpu__init_parse_early_param();
+       fpu__init_system_early_generic(c);
+       /*
+@@ -336,62 +362,3 @@ void __init fpu__init_system(struct cpui
+       fpu__init_system_ctx_switch();
+ }
+-
+-/*
+- * Boot parameter to turn off FPU support and fall back to math-emu:
+- */
+-static int __init no_387(char *s)
+-{
+-      setup_clear_cpu_cap(X86_FEATURE_FPU);
+-      return 1;
+-}
+-__setup("no387", no_387);
+-
+-/*
+- * Disable all xstate CPU features:
+- */
+-static int __init x86_noxsave_setup(char *s)
+-{
+-      if (strlen(s))
+-              return 0;
+-
+-      fpu__xstate_clear_all_cpu_caps();
+-
+-      return 1;
+-}
+-__setup("noxsave", x86_noxsave_setup);
+-
+-/*
+- * Disable the XSAVEOPT instruction specifically:
+- */
+-static int __init x86_noxsaveopt_setup(char *s)
+-{
+-      setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+-
+-      return 1;
+-}
+-__setup("noxsaveopt", x86_noxsaveopt_setup);
+-
+-/*
+- * Disable the XSAVES instruction:
+- */
+-static int __init x86_noxsaves_setup(char *s)
+-{
+-      setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+-
+-      return 1;
+-}
+-__setup("noxsaves", x86_noxsaves_setup);
+-
+-/*
+- * Disable FX save/restore and SSE support:
+- */
+-static int __init x86_nofxsr_setup(char *s)
+-{
+-      setup_clear_cpu_cap(X86_FEATURE_FXSR);
+-      setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
+-      setup_clear_cpu_cap(X86_FEATURE_XMM);
+-
+-      return 1;
+-}
+-__setup("nofxsr", x86_nofxsr_setup);
diff --git a/queue-4.4/x86-fpu-fix-no387-regression.patch b/queue-4.4/x86-fpu-fix-no387-regression.patch
new file mode 100644 (file)
index 0000000..1d4bee6
--- /dev/null
@@ -0,0 +1,65 @@
+From f363938c70a04e6bc99023a5e0c44ef7879b903f Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Thu, 21 Jan 2016 15:24:31 -0800
+Subject: x86/fpu: Fix 'no387' regression
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit f363938c70a04e6bc99023a5e0c44ef7879b903f upstream.
+
+After fixing FPU option parsing, we now parse the 'no387' boot option
+too early: no387 clears X86_FEATURE_FPU before it's even probed, so
+the boot CPU promptly re-enables it.
+
+I suspect it gets even more confused on SMP.
+
+Fix the probing code to leave X86_FEATURE_FPU off if it's been
+disabled by setup_clear_cpu_cap().
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Fixes: 4f81cbafcce2 ("x86/fpu: Fix early FPU command-line parsing")
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/fpu/init.c |   14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -78,13 +78,15 @@ static void fpu__init_system_early_gener
+       cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
+       write_cr0(cr0);
+-      asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+-                   : "+m" (fsw), "+m" (fcw));
++      if (!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
++              asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
++                           : "+m" (fsw), "+m" (fcw));
+-      if (fsw == 0 && (fcw & 0x103f) == 0x003f)
+-              set_cpu_cap(c, X86_FEATURE_FPU);
+-      else
+-              clear_cpu_cap(c, X86_FEATURE_FPU);
++              if (fsw == 0 && (fcw & 0x103f) == 0x003f)
++                      set_cpu_cap(c, X86_FEATURE_FPU);
++              else
++                      clear_cpu_cap(c, X86_FEATURE_FPU);
++      }
+ #ifndef CONFIG_MATH_EMULATION
+       if (!cpu_has_fpu) {
diff --git a/queue-4.4/x86-fpu-hard-disable-lazy-fpu-mode.patch b/queue-4.4/x86-fpu-hard-disable-lazy-fpu-mode.patch
new file mode 100644 (file)
index 0000000..cd296c5
--- /dev/null
@@ -0,0 +1,196 @@
+From ca6938a1cd8a1c5e861a99b67f84ac166fc2b9e7 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Tue, 4 Oct 2016 20:34:31 -0400
+Subject: x86/fpu: Hard-disable lazy FPU mode
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit ca6938a1cd8a1c5e861a99b67f84ac166fc2b9e7 upstream.
+
+Since commit:
+
+  58122bf1d856 ("x86/fpu: Default eagerfpu=on on all CPUs")
+
+... in Linux 4.6, eager FPU mode has been the default on all x86
+systems, and no one has reported any regressions.
+
+This patch removes the ability to enable lazy mode: use_eager_fpu()
+becomes "return true" and all of the FPU mode selection machinery is
+removed.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Rik van Riel <riel@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: pbonzini@redhat.com
+Link: http://lkml.kernel.org/r/1475627678-20788-3-git-send-email-riel@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/cpufeature.h   |    2 
+ arch/x86/include/asm/fpu/internal.h |    2 
+ arch/x86/kernel/fpu/init.c          |   91 +-----------------------------------
+ 3 files changed, 5 insertions(+), 90 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -104,7 +104,7 @@
+ #define X86_FEATURE_EXTD_APICID       ( 3*32+26) /* has extended APICID (8 bits) */
+ #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
+ #define X86_FEATURE_APERFMPERF        ( 3*32+28) /* APERFMPERF */
+-#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
++/* free, was #define X86_FEATURE_EAGER_FPU    ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
+ #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -58,7 +58,7 @@ extern u64 fpu__get_supported_xfeatures_
+  */
+ static __always_inline __pure bool use_eager_fpu(void)
+ {
+-      return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
++      return true;
+ }
+ static __always_inline __pure bool use_xsaveopt(void)
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -15,10 +15,7 @@
+  */
+ static void fpu__init_cpu_ctx_switch(void)
+ {
+-      if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
+-              stts();
+-      else
+-              clts();
++      clts();
+ }
+ /*
+@@ -235,82 +232,16 @@ static void __init fpu__init_system_xsta
+ }
+ /*
+- * FPU context switching strategies:
+- *
+- * Against popular belief, we don't do lazy FPU saves, due to the
+- * task migration complications it brings on SMP - we only do
+- * lazy FPU restores.
+- *
+- * 'lazy' is the traditional strategy, which is based on setting
+- * CR0::TS to 1 during context-switch (instead of doing a full
+- * restore of the FPU state), which causes the first FPU instruction
+- * after the context switch (whenever it is executed) to fault - at
+- * which point we lazily restore the FPU state into FPU registers.
+- *
+- * Tasks are of course under no obligation to execute FPU instructions,
+- * so it can easily happen that another context-switch occurs without
+- * a single FPU instruction being executed. If we eventually switch
+- * back to the original task (that still owns the FPU) then we have
+- * not only saved the restores along the way, but we also have the
+- * FPU ready to be used for the original task.
+- *
+- * 'lazy' is deprecated because it's almost never a performance win
+- * and it's much more complicated than 'eager'.
+- *
+- * 'eager' switching is by default on all CPUs, there we switch the FPU
+- * state during every context switch, regardless of whether the task
+- * has used FPU instructions in that time slice or not. This is done
+- * because modern FPU context saving instructions are able to optimize
+- * state saving and restoration in hardware: they can detect both
+- * unused and untouched FPU state and optimize accordingly.
+- *
+- * [ Note that even in 'lazy' mode we might optimize context switches
+- *   to use 'eager' restores, if we detect that a task is using the FPU
+- *   frequently. See the fpu->counter logic in fpu/internal.h for that. ]
+- */
+-static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
+-
+-/*
+  * Find supported xfeatures based on cpu features and command-line input.
+  * This must be called after fpu__init_parse_early_param() is called and
+  * xfeatures_mask is enumerated.
+  */
+ u64 __init fpu__get_supported_xfeatures_mask(void)
+ {
+-      /* Support all xfeatures known to us */
+-      if (eagerfpu != DISABLE)
+-              return XCNTXT_MASK;
+-
+-      /* Warning of xfeatures being disabled for no eagerfpu mode */
+-      if (xfeatures_mask & XFEATURE_MASK_EAGER) {
+-              pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
+-                      xfeatures_mask & XFEATURE_MASK_EAGER);
+-      }
+-
+-      /* Return a mask that masks out all features requiring eagerfpu mode */
+-      return ~XFEATURE_MASK_EAGER;
++      return XCNTXT_MASK;
+ }
+-/*
+- * Disable features dependent on eagerfpu.
+- */
+-static void __init fpu__clear_eager_fpu_features(void)
+-{
+-      setup_clear_cpu_cap(X86_FEATURE_MPX);
+-}
+-
+-/*
+- * Pick the FPU context switching strategy:
+- *
+- * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of
+- * the following is true:
+- *
+- * (1) the cpu has xsaveopt, as it has the optimization and doing eager
+- *     FPU switching has a relatively low cost compared to a plain xsave;
+- * (2) the cpu has xsave features (e.g. MPX) that depend on eager FPU
+- *     switching. Should the kernel boot with noxsaveopt, we support MPX
+- *     with eager FPU switching at a higher cost.
+- */
++/* Legacy code to initialize eager fpu mode. */
+ static void __init fpu__init_system_ctx_switch(void)
+ {
+       static bool on_boot_cpu = 1;
+@@ -320,17 +251,6 @@ static void __init fpu__init_system_ctx_
+       WARN_ON_FPU(current->thread.fpu.fpstate_active);
+       current_thread_info()->status = 0;
+-
+-      if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
+-              eagerfpu = ENABLE;
+-
+-      if (xfeatures_mask & XFEATURE_MASK_EAGER)
+-              eagerfpu = ENABLE;
+-
+-      if (eagerfpu == ENABLE)
+-              setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
+-
+-      printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
+ }
+ /*
+@@ -339,11 +259,6 @@ static void __init fpu__init_system_ctx_
+  */
+ static void __init fpu__init_parse_early_param(void)
+ {
+-      if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
+-              eagerfpu = DISABLE;
+-              fpu__clear_eager_fpu_features();
+-      }
+-
+       if (cmdline_find_option_bool(boot_command_line, "no387"))
+               setup_clear_cpu_cap(X86_FEATURE_FPU);
diff --git a/queue-4.4/x86-fpu-revert-x86-fpu-disable-avx-when-eagerfpu-is-off.patch b/queue-4.4/x86-fpu-revert-x86-fpu-disable-avx-when-eagerfpu-is-off.patch
new file mode 100644 (file)
index 0000000..96e3ed4
--- /dev/null
@@ -0,0 +1,93 @@
+From a65050c6f17e52442716138d48d0a47301a8344b Mon Sep 17 00:00:00 2001
+From: Yu-cheng Yu <yu-cheng.yu@intel.com>
+Date: Wed, 9 Mar 2016 16:28:54 -0800
+Subject: x86/fpu: Revert ("x86/fpu: Disable AVX when eagerfpu is off")
+
+From: Yu-cheng Yu <yu-cheng.yu@intel.com>
+
+commit a65050c6f17e52442716138d48d0a47301a8344b upstream.
+
+Leonid Shatz noticed that the SDM interpretation of the following
+recent commit:
+
+  394db20ca240741 ("x86/fpu: Disable AVX when eagerfpu is off")
+
+... is incorrect and that the original behavior of the FPU code was correct.
+
+Because AVX is not stated in CR0 TS bit description, it was mistakenly
+believed to be not supported for lazy context switch. This turns out
+to be false:
+
+  Intel Software Developer's Manual Vol. 3A, Sec. 2.5 Control Registers:
+
+   'TS Task Switched bit (bit 3 of CR0) -- Allows the saving of the x87 FPU/
+    MMX/SSE/SSE2/SSE3/SSSE3/SSE4 context on a task switch to be delayed until
+    an x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instruction is actually executed
+    by the new task.'
+
+  Intel Software Developer's Manual Vol. 2A, Sec. 2.4 Instruction Exception
+  Specification:
+
+   'AVX instructions refer to exceptions by classes that include #NM
+    "Device Not Available" exception for lazy context switch.'
+
+So revert the commit.
+
+Reported-by: Leonid Shatz <leonid.shatz@ravellosystems.com>
+Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ravi V. Shankar <ravi.v.shankar@intel.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1457569734-3785-1-git-send-email-yu-cheng.yu@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/fpu/xstate.h |    9 ++++-----
+ arch/x86/kernel/fpu/init.c        |    6 ------
+ 2 files changed, 4 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/include/asm/fpu/xstate.h
++++ b/arch/x86/include/asm/fpu/xstate.h
+@@ -20,16 +20,15 @@
+ /* Supported features which support lazy state saving */
+ #define XFEATURE_MASK_LAZY    (XFEATURE_MASK_FP | \
+-                               XFEATURE_MASK_SSE)
+-
+-/* Supported features which require eager state saving */
+-#define XFEATURE_MASK_EAGER   (XFEATURE_MASK_BNDREGS | \
+-                               XFEATURE_MASK_BNDCSR | \
++                               XFEATURE_MASK_SSE | \
+                                XFEATURE_MASK_YMM | \
+                                XFEATURE_MASK_OPMASK | \
+                                XFEATURE_MASK_ZMM_Hi256 | \
+                                XFEATURE_MASK_Hi16_ZMM)
++/* Supported features which require eager state saving */
++#define XFEATURE_MASK_EAGER   (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
++
+ /* All currently supported features */
+ #define XCNTXT_MASK   (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -297,12 +297,6 @@ u64 __init fpu__get_supported_xfeatures_
+ static void __init fpu__clear_eager_fpu_features(void)
+ {
+       setup_clear_cpu_cap(X86_FEATURE_MPX);
+-      setup_clear_cpu_cap(X86_FEATURE_AVX);
+-      setup_clear_cpu_cap(X86_FEATURE_AVX2);
+-      setup_clear_cpu_cap(X86_FEATURE_AVX512F);
+-      setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
+-      setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
+-      setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
+ }
+ /*
diff --git a/queue-4.4/x86-mm-pat-x86-cpufeature-remove-cpu_has_pat.patch b/queue-4.4/x86-mm-pat-x86-cpufeature-remove-cpu_has_pat.patch
new file mode 100644 (file)
index 0000000..d0e7fda
--- /dev/null
@@ -0,0 +1,45 @@
+From 568a58e5dfbcb88011cad7f87ed046aa00f19d1a Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 29 Mar 2016 17:42:01 +0200
+Subject: x86/mm/pat, x86/cpufeature: Remove cpu_has_pat
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 568a58e5dfbcb88011cad7f87ed046aa00f19d1a upstream.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: intel-gfx@lists.freedesktop.org
+Link: http://lkml.kernel.org/r/1459266123-21878-9-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/cpufeature.h |    1 -
+ drivers/gpu/drm/i915/i915_gem.c   |    2 +-
+ 2 files changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -380,7 +380,6 @@ extern const char * const x86_bug_flags[
+ #define cpu_has_avx2          boot_cpu_has(X86_FEATURE_AVX2)
+ #define cpu_has_clflush               boot_cpu_has(X86_FEATURE_CLFLUSH)
+ #define cpu_has_gbpages               boot_cpu_has(X86_FEATURE_GBPAGES)
+-#define cpu_has_pat           boot_cpu_has(X86_FEATURE_PAT)
+ #define cpu_has_x2apic                boot_cpu_has(X86_FEATURE_X2APIC)
+ #define cpu_has_xsave         boot_cpu_has(X86_FEATURE_XSAVE)
+ #define cpu_has_xsaves                boot_cpu_has(X86_FEATURE_XSAVES)
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1730,7 +1730,7 @@ i915_gem_mmap_ioctl(struct drm_device *d
+       if (args->flags & ~(I915_MMAP_WC))
+               return -EINVAL;
+-      if (args->flags & I915_MMAP_WC && !cpu_has_pat)
++      if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
+               return -ENODEV;
+       obj = drm_gem_object_lookup(dev, file, args->handle);
diff --git a/queue-4.4/x86-remove-unused-function-cpu_has_ht_siblings.patch b/queue-4.4/x86-remove-unused-function-cpu_has_ht_siblings.patch
new file mode 100644 (file)
index 0000000..68e31f7
--- /dev/null
@@ -0,0 +1,38 @@
+From ed29210cd6a67425026e78aa298fa434e11a74e3 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Tue, 17 Nov 2015 13:05:43 +0100
+Subject: x86: Remove unused function cpu_has_ht_siblings()
+
+From: Juergen Gross <jgross@suse.com>
+
+commit ed29210cd6a67425026e78aa298fa434e11a74e3 upstream.
+
+It is used nowhere.
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Link: http://lkml.kernel.org/r/1447761943-770-1-git-send-email-jgross@suse.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/smp.h |    9 ---------
+ 1 file changed, 9 deletions(-)
+
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -21,15 +21,6 @@
+ extern int smp_num_siblings;
+ extern unsigned int num_processors;
+-static inline bool cpu_has_ht_siblings(void)
+-{
+-      bool has_siblings = false;
+-#ifdef CONFIG_SMP
+-      has_siblings = cpu_has_ht && smp_num_siblings > 1;
+-#endif
+-      return has_siblings;
+-}
+-
+ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
+ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
+ /* cpus sharing the last level cache: */