]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Jan 2018 10:35:04 +0000 (11:35 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Jan 2018 10:35:04 +0000 (11:35 +0100)
added patches:
export_symbol-for-asm.patch
kconfig.h-use-__is_defined-to-check-if-module-is-defined.patch
x86-asm-make-asm-alternative.h-safe-from-assembly.patch
x86-asm-use-register-variable-to-get-stack-pointer-value.patch
x86-cpu-amd-make-lfence-a-serializing-instruction.patch
x86-cpu-amd-use-lfence_rdtsc-in-preference-to-mfence_rdtsc.patch
x86-kbuild-enable-modversions-for-symbols-exported-from-asm.patch
x86-mm-32-move-setup_clear_cpu_cap-x86_feature_pcid-earlier.patch

queue-4.4/export_symbol-for-asm.patch [new file with mode: 0644]
queue-4.4/kconfig.h-use-__is_defined-to-check-if-module-is-defined.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/x86-asm-make-asm-alternative.h-safe-from-assembly.patch [new file with mode: 0644]
queue-4.4/x86-asm-use-register-variable-to-get-stack-pointer-value.patch [new file with mode: 0644]
queue-4.4/x86-cpu-amd-make-lfence-a-serializing-instruction.patch [new file with mode: 0644]
queue-4.4/x86-cpu-amd-use-lfence_rdtsc-in-preference-to-mfence_rdtsc.patch [new file with mode: 0644]
queue-4.4/x86-kbuild-enable-modversions-for-symbols-exported-from-asm.patch [new file with mode: 0644]
queue-4.4/x86-mm-32-move-setup_clear_cpu_cap-x86_feature_pcid-earlier.patch [new file with mode: 0644]

diff --git a/queue-4.4/export_symbol-for-asm.patch b/queue-4.4/export_symbol-for-asm.patch
new file mode 100644 (file)
index 0000000..ef295b8
--- /dev/null
@@ -0,0 +1,163 @@
+From 22823ab419d8ed884195cfa75483fd3a99bb1462 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Mon, 11 Jan 2016 10:54:54 -0500
+Subject: EXPORT_SYMBOL() for asm
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 22823ab419d8ed884195cfa75483fd3a99bb1462 upstream.
+
+Add asm-usable variants of EXPORT_SYMBOL/EXPORT_SYMBOL_GPL.  This
+commit just adds the default implementation; most of the architectures
+can simply add export.h to asm/Kbuild and start using <asm/export.h>
+from assembler.  The rest needs to have their <asm/export.h> define
+everal macros and then explicitly include <asm-generic/export.h>
+
+One area where the things might diverge from default is the alignment;
+normally it's 8 bytes on 64bit targets and 4 on 32bit ones, both for
+unsigned long and for struct kernel_symbol.  Unfortunately, amd64 and
+m68k are unusual - m68k aligns to 2 bytes (for both) and amd64 aligns
+struct kernel_symbol to 16 bytes.  For those we'll need asm/export.h to
+override the constants used by generic version - KSYM_ALIGN and KCRC_ALIGN
+for kernel_symbol and unsigned long resp.  And no, __alignof__ would not
+do the trick - on amd64 __alignof__ of struct kernel_symbol is 8, not 16.
+
+More serious source of unpleasantness is treatment of function
+descriptors on architectures that have those.  Things like ppc64,
+parisc, ia64, etc.  need more than the address of the first insn to
+call an arbitrary function.  As the result, their representation of
+pointers to functions is not the typical "address of the entry point" -
+it's an address of a small static structure containing all the required
+information (including the entry point, of course).  Sadly, the asm-side
+conventions differ in what the function name refers to - entry point or
+the function descriptor.  On ppc64 we do the latter;
+       bar: .quad foo
+is what void (*bar)(void) = foo; turns into and the rare places where
+we need to explicitly work with the label of entry point are dealt with
+as DOTSYM(foo).  For our purposes it's ideal - generic macros are usable.
+However, parisc would have foo and P%foo used for label of entry point
+and address of the function descriptor and
+       bar: .long P%foo
+woudl be used instead. ia64 goes similar to parisc in that respect,
+except that there it's @fptr(foo) rather than P%foo.  Such architectures
+need to define KSYM_FUNC that would turn a function name into whatever
+is needed to refer to function descriptor.
+
+What's more, on such architectures we need to know whether we are exporting
+a function or an object - in assembler we have to tell that explicitly, to
+decide whether we want EXPORT_SYMBOL(foo) produce e.g.
+       __ksymtab_foo: .quad foo
+or
+       __ksymtab_foo: .quad @fptr(foo)
+
+For that reason we introduce EXPORT_DATA_SYMBOL{,_GPL}(), to be used for
+exports of data objects.  On normal architectures it's the same thing
+as EXPORT_SYMBOL{,_GPL}(), but on parisc-like ones they differ and the
+right one needs to be used.  Most of the exports are functions, so we
+keep EXPORT_SYMBOL for those...
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Razvan Ghitulete <rga@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/asm-generic/export.h |   94 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 94 insertions(+)
+
+--- /dev/null
++++ b/include/asm-generic/export.h
+@@ -0,0 +1,94 @@
++#ifndef __ASM_GENERIC_EXPORT_H
++#define __ASM_GENERIC_EXPORT_H
++
++#ifndef KSYM_FUNC
++#define KSYM_FUNC(x) x
++#endif
++#ifdef CONFIG_64BIT
++#define __put .quad
++#ifndef KSYM_ALIGN
++#define KSYM_ALIGN 8
++#endif
++#ifndef KCRC_ALIGN
++#define KCRC_ALIGN 8
++#endif
++#else
++#define __put .long
++#ifndef KSYM_ALIGN
++#define KSYM_ALIGN 4
++#endif
++#ifndef KCRC_ALIGN
++#define KCRC_ALIGN 4
++#endif
++#endif
++
++#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
++#define KSYM(name) _##name
++#else
++#define KSYM(name) name
++#endif
++
++/*
++ * note on .section use: @progbits vs %progbits nastiness doesn't matter,
++ * since we immediately emit into those sections anyway.
++ */
++.macro ___EXPORT_SYMBOL name,val,sec
++#ifdef CONFIG_MODULES
++      .globl KSYM(__ksymtab_\name)
++      .section ___ksymtab\sec+\name,"a"
++      .balign KSYM_ALIGN
++KSYM(__ksymtab_\name):
++      __put \val, KSYM(__kstrtab_\name)
++      .previous
++      .section __ksymtab_strings,"a"
++KSYM(__kstrtab_\name):
++#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
++      .asciz "_\name"
++#else
++      .asciz "\name"
++#endif
++      .previous
++#ifdef CONFIG_MODVERSIONS
++      .section ___kcrctab\sec+\name,"a"
++      .balign KCRC_ALIGN
++KSYM(__kcrctab_\name):
++      __put KSYM(__crc_\name)
++      .weak KSYM(__crc_\name)
++      .previous
++#endif
++#endif
++.endm
++#undef __put
++
++#if defined(__KSYM_DEPS__)
++
++#define __EXPORT_SYMBOL(sym, val, sec)        === __KSYM_##sym ===
++
++#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
++
++#include <linux/kconfig.h>
++#include <generated/autoksyms.h>
++
++#define __EXPORT_SYMBOL(sym, val, sec)                                \
++      __cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym))
++#define __cond_export_sym(sym, val, sec, conf)                        \
++      ___cond_export_sym(sym, val, sec, conf)
++#define ___cond_export_sym(sym, val, sec, enabled)            \
++      __cond_export_sym_##enabled(sym, val, sec)
++#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
++#define __cond_export_sym_0(sym, val, sec) /* nothing */
++
++#else
++#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
++#endif
++
++#define EXPORT_SYMBOL(name)                                   \
++      __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),)
++#define EXPORT_SYMBOL_GPL(name)                               \
++      __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl)
++#define EXPORT_DATA_SYMBOL(name)                              \
++      __EXPORT_SYMBOL(name, KSYM(name),)
++#define EXPORT_DATA_SYMBOL_GPL(name)                          \
++      __EXPORT_SYMBOL(name, KSYM(name),_gpl)
++
++#endif
diff --git a/queue-4.4/kconfig.h-use-__is_defined-to-check-if-module-is-defined.patch b/queue-4.4/kconfig.h-use-__is_defined-to-check-if-module-is-defined.patch
new file mode 100644 (file)
index 0000000..4fd0cac
--- /dev/null
@@ -0,0 +1,57 @@
+From 4f920843d248946545415c1bf6120942048708ed Mon Sep 17 00:00:00 2001
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+Date: Tue, 14 Jun 2016 14:58:54 +0900
+Subject: kconfig.h: use __is_defined() to check if MODULE is defined
+
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+
+commit 4f920843d248946545415c1bf6120942048708ed upstream.
+
+The macro MODULE is not a config option, it is a per-file build
+option.  So, config_enabled(MODULE) is not sensible.  (There is
+another case in include/linux/export.h, where config_enabled() is
+used against a non-config option.)
+
+This commit renames some macros in include/linux/kconfig.h for the
+use for non-config macros and replaces config_enabled(MODULE) with
+__is_defined(MODULE).
+
+I am keeping config_enabled() because it is still referenced from
+some places, but I expect it would be deprecated in the future.
+
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Signed-off-by: Michal Marek <mmarek@suse.com>
+Signed-off-by: Razvan Ghitulete <rga@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/kconfig.h |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/include/linux/kconfig.h
++++ b/include/linux/kconfig.h
+@@ -17,10 +17,11 @@
+  * the last step cherry picks the 2nd arg, we get a zero.
+  */
+ #define __ARG_PLACEHOLDER_1 0,
+-#define config_enabled(cfg) _config_enabled(cfg)
+-#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
+-#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
+-#define ___config_enabled(__ignored, val, ...) val
++#define config_enabled(cfg)           ___is_defined(cfg)
++#define __is_defined(x)                       ___is_defined(x)
++#define ___is_defined(val)            ____is_defined(__ARG_PLACEHOLDER_##val)
++#define ____is_defined(arg1_or_junk)  __take_second_arg(arg1_or_junk 1, 0)
++#define __take_second_arg(__ignored, val, ...) val
+ /*
+  * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
+@@ -42,7 +43,7 @@
+  * built-in code when CONFIG_FOO is set to 'm'.
+  */
+ #define IS_REACHABLE(option) (config_enabled(option) || \
+-               (config_enabled(option##_MODULE) && config_enabled(MODULE)))
++               (config_enabled(option##_MODULE) && __is_defined(MODULE)))
+ /*
+  * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
index ea1655409dc2888e9aef70486067b7bae56a300a..56ee1318de536dd634d0f5fabf41db9f88c2c045 100644 (file)
@@ -1 +1,9 @@
 gcov-disable-for-compile_test.patch
+x86-cpu-amd-make-lfence-a-serializing-instruction.patch
+x86-cpu-amd-use-lfence_rdtsc-in-preference-to-mfence_rdtsc.patch
+x86-mm-32-move-setup_clear_cpu_cap-x86_feature_pcid-earlier.patch
+x86-asm-use-register-variable-to-get-stack-pointer-value.patch
+x86-kbuild-enable-modversions-for-symbols-exported-from-asm.patch
+x86-asm-make-asm-alternative.h-safe-from-assembly.patch
+export_symbol-for-asm.patch
+kconfig.h-use-__is_defined-to-check-if-module-is-defined.patch
diff --git a/queue-4.4/x86-asm-make-asm-alternative.h-safe-from-assembly.patch b/queue-4.4/x86-asm-make-asm-alternative.h-safe-from-assembly.patch
new file mode 100644 (file)
index 0000000..1a0e886
--- /dev/null
@@ -0,0 +1,48 @@
+From f005f5d860e0231fe212cfda8c1a3148b99609f4 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Tue, 26 Apr 2016 12:23:25 -0700
+Subject: x86/asm: Make asm/alternative.h safe from assembly
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit f005f5d860e0231fe212cfda8c1a3148b99609f4 upstream.
+
+asm/alternative.h isn't directly useful from assembly, but it
+shouldn't break the build.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/e5b693fcef99fe6e80341c9e97a002fb23871e91.1461698311.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Razvan Ghitulete <rga@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/alternative.h |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -1,6 +1,8 @@
+ #ifndef _ASM_X86_ALTERNATIVE_H
+ #define _ASM_X86_ALTERNATIVE_H
++#ifndef __ASSEMBLY__
++
+ #include <linux/types.h>
+ #include <linux/stddef.h>
+ #include <linux/stringify.h>
+@@ -271,4 +273,6 @@ extern void *text_poke(void *addr, const
+ extern int poke_int3_handler(struct pt_regs *regs);
+ extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
++#endif /* __ASSEMBLY__ */
++
+ #endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/queue-4.4/x86-asm-use-register-variable-to-get-stack-pointer-value.patch b/queue-4.4/x86-asm-use-register-variable-to-get-stack-pointer-value.patch
new file mode 100644 (file)
index 0000000..fba18b7
--- /dev/null
@@ -0,0 +1,128 @@
+From 196bd485ee4f03ce4c690bfcf38138abfcd0a4bc Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Fri, 29 Sep 2017 17:15:36 +0300
+Subject: x86/asm: Use register variable to get stack pointer value
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 196bd485ee4f03ce4c690bfcf38138abfcd0a4bc upstream.
+
+Currently we use current_stack_pointer() function to get the value
+of the stack pointer register. Since commit:
+
+  f5caf621ee35 ("x86/asm: Fix inline asm call constraints for Clang")
+
+... we have a stack register variable declared. It can be used instead of
+current_stack_pointer() function which allows to optimize away some
+excessive "mov %rsp, %<dst>" instructions:
+
+ -mov    %rsp,%rdx
+ -sub    %rdx,%rax
+ -cmp    $0x3fff,%rax
+ -ja     ffffffff810722fd <ist_begin_non_atomic+0x2d>
+
+ +sub    %rsp,%rax
+ +cmp    $0x3fff,%rax
+ +ja     ffffffff810722fa <ist_begin_non_atomic+0x2a>
+
+Remove current_stack_pointer(), rename __asm_call_sp to current_stack_pointer
+and use it instead of the removed function.
+
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20170929141537.29167-1-aryabinin@virtuozzo.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+[dwmw2: We want ASM_CALL_CONSTRAINT for retpoline]
+Signed-off-by: David Woodhouse <dwmw@amazon.co.ku>
+Signed-off-by: Razvan Ghitulete <rga@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/asm.h         |   11 +++++++++++
+ arch/x86/include/asm/thread_info.h |   11 -----------
+ arch/x86/kernel/irq_32.c           |    6 +++---
+ arch/x86/kernel/traps.c            |    2 +-
+ 4 files changed, 15 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/include/asm/asm.h
++++ b/arch/x86/include/asm/asm.h
+@@ -105,4 +105,15 @@
+ /* For C file, we already have NOKPROBE_SYMBOL macro */
+ #endif
++#ifndef __ASSEMBLY__
++/*
++ * This output constraint should be used for any inline asm which has a "call"
++ * instruction.  Otherwise the asm may be inserted before the frame pointer
++ * gets set up by the containing function.  If you forget to do this, objtool
++ * may print a "call without frame pointer save/setup" warning.
++ */
++register unsigned long current_stack_pointer asm(_ASM_SP);
++#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
++#endif
++
+ #endif /* _ASM_X86_ASM_H */
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -166,17 +166,6 @@ static inline struct thread_info *curren
+       return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
+ }
+-static inline unsigned long current_stack_pointer(void)
+-{
+-      unsigned long sp;
+-#ifdef CONFIG_X86_64
+-      asm("mov %%rsp,%0" : "=g" (sp));
+-#else
+-      asm("mov %%esp,%0" : "=g" (sp));
+-#endif
+-      return sp;
+-}
+-
+ #else /* !__ASSEMBLY__ */
+ #ifdef CONFIG_X86_64
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -65,7 +65,7 @@ static void call_on_stack(void *func, vo
+ static inline void *current_stack(void)
+ {
+-      return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
++      return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
+ }
+ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
+@@ -89,7 +89,7 @@ static inline int execute_on_irq_stack(i
+       /* Save the next esp at the bottom of the stack */
+       prev_esp = (u32 *)irqstk;
+-      *prev_esp = current_stack_pointer();
++      *prev_esp = current_stack_pointer;
+       if (unlikely(overflow))
+               call_on_stack(print_stack_overflow, isp);
+@@ -142,7 +142,7 @@ void do_softirq_own_stack(void)
+       /* Push the previous esp onto the stack */
+       prev_esp = (u32 *)irqstk;
+-      *prev_esp = current_stack_pointer();
++      *prev_esp = current_stack_pointer;
+       call_on_stack(__do_softirq, isp);
+ }
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -166,7 +166,7 @@ void ist_begin_non_atomic(struct pt_regs
+        * from double_fault.
+        */
+       BUG_ON((unsigned long)(current_top_of_stack() -
+-                             current_stack_pointer()) >= THREAD_SIZE);
++                             current_stack_pointer) >= THREAD_SIZE);
+       preempt_enable_no_resched();
+ }
diff --git a/queue-4.4/x86-cpu-amd-make-lfence-a-serializing-instruction.patch b/queue-4.4/x86-cpu-amd-make-lfence-a-serializing-instruction.patch
new file mode 100644 (file)
index 0000000..c805a1a
--- /dev/null
@@ -0,0 +1,66 @@
+From e4d0e84e490790798691aaa0f2e598637f1867ec Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 8 Jan 2018 16:09:21 -0600
+Subject: x86/cpu/AMD: Make LFENCE a serializing instruction
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit e4d0e84e490790798691aaa0f2e598637f1867ec upstream.
+
+To aid in speculation control, make LFENCE a serializing instruction
+since it has less overhead than MFENCE.  This is done by setting bit 1
+of MSR 0xc0011029 (DE_CFG).  Some families that support LFENCE do not
+have this MSR.  For these families, the LFENCE instruction is already
+serializing.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/20180108220921.12580.71694.stgit@tlendack-t1.amdoffice.net
+Signed-off-by: Razvan Ghitulete <rga@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/msr-index.h |    2 ++
+ arch/x86/kernel/cpu/amd.c        |   10 ++++++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -330,6 +330,8 @@
+ #define FAM10H_MMIO_CONF_BASE_MASK    0xfffffffULL
+ #define FAM10H_MMIO_CONF_BASE_SHIFT   20
+ #define MSR_FAM10H_NODE_ID            0xc001100c
++#define MSR_F10H_DECFG                        0xc0011029
++#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT   1
+ /* K8 MSRs */
+ #define MSR_K8_TOP_MEM1                       0xc001001a
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -746,6 +746,16 @@ static void init_amd(struct cpuinfo_x86
+               set_cpu_cap(c, X86_FEATURE_K8);
+       if (cpu_has_xmm2) {
++              /*
++               * A serializing LFENCE has less overhead than MFENCE, so
++               * use it for execution serialization.  On families which
++               * don't have that MSR, LFENCE is already serializing.
++               * msr_set_bit() uses the safe accessors, too, even if the MSR
++               * is not present.
++               */
++              msr_set_bit(MSR_F10H_DECFG,
++                          MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
++
+               /* MFENCE stops RDTSC speculation */
+               set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+       }
diff --git a/queue-4.4/x86-cpu-amd-use-lfence_rdtsc-in-preference-to-mfence_rdtsc.patch b/queue-4.4/x86-cpu-amd-use-lfence_rdtsc-in-preference-to-mfence_rdtsc.patch
new file mode 100644 (file)
index 0000000..e88fbc6
--- /dev/null
@@ -0,0 +1,80 @@
+From 9c6a73c75864ad9fa49e5fa6513e4c4071c0e29f Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 8 Jan 2018 16:09:32 -0600
+Subject: x86/cpu/AMD: Use LFENCE_RDTSC in preference to MFENCE_RDTSC
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit 9c6a73c75864ad9fa49e5fa6513e4c4071c0e29f upstream.
+
+With LFENCE now a serializing instruction, use LFENCE_RDTSC in preference
+to MFENCE_RDTSC.  However, since the kernel could be running under a
+hypervisor that does not support writing that MSR, read the MSR back and
+verify that the bit has been set successfully.  If the MSR can be read
+and the bit is set, then set the LFENCE_RDTSC feature, otherwise set the
+MFENCE_RDTSC feature.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/20180108220932.12580.52458.stgit@tlendack-t1.amdoffice.net
+Signed-off-by: Razvan Ghitulete <rga@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h |    1 +
+ arch/x86/kernel/cpu/amd.c        |   18 ++++++++++++++++--
+ 2 files changed, 17 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -332,6 +332,7 @@
+ #define MSR_FAM10H_NODE_ID            0xc001100c
+ #define MSR_F10H_DECFG                        0xc0011029
+ #define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT   1
++#define MSR_F10H_DECFG_LFENCE_SERIALIZE               BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
+ /* K8 MSRs */
+ #define MSR_K8_TOP_MEM1                       0xc001001a
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -746,6 +746,9 @@ static void init_amd(struct cpuinfo_x86
+               set_cpu_cap(c, X86_FEATURE_K8);
+       if (cpu_has_xmm2) {
++              unsigned long long val;
++              int ret;
++
+               /*
+                * A serializing LFENCE has less overhead than MFENCE, so
+                * use it for execution serialization.  On families which
+@@ -756,8 +759,19 @@ static void init_amd(struct cpuinfo_x86
+               msr_set_bit(MSR_F10H_DECFG,
+                           MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+-              /* MFENCE stops RDTSC speculation */
+-              set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
++              /*
++               * Verify that the MSR write was successful (could be running
++               * under a hypervisor) and only then assume that LFENCE is
++               * serializing.
++               */
++              ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
++              if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
++                      /* A serializing LFENCE stops RDTSC speculation */
++                      set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
++              } else {
++                      /* MFENCE stops RDTSC speculation */
++                      set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
++              }
+       }
+       /*
diff --git a/queue-4.4/x86-kbuild-enable-modversions-for-symbols-exported-from-asm.patch b/queue-4.4/x86-kbuild-enable-modversions-for-symbols-exported-from-asm.patch
new file mode 100644 (file)
index 0000000..1860d92
--- /dev/null
@@ -0,0 +1,63 @@
+From 334bb773876403eae3457d81be0b8ea70f8e4ccc Mon Sep 17 00:00:00 2001
+From: Adam Borowski <kilobyte@angband.pl>
+Date: Sun, 11 Dec 2016 02:09:18 +0100
+Subject: x86/kbuild: enable modversions for symbols exported from asm
+
+From: Adam Borowski <kilobyte@angband.pl>
+
+commit 334bb773876403eae3457d81be0b8ea70f8e4ccc upstream.
+
+Commit 4efca4ed ("kbuild: modversions for EXPORT_SYMBOL() for asm") adds
+modversion support for symbols exported from asm files. Architectures
+must include C-style declarations for those symbols in asm/asm-prototypes.h
+in order for them to be versioned.
+
+Add these declarations for x86, and an architecture-independent file that
+can be used for common symbols.
+
+With f27c2f6 reverting 8ab2ae6 ("default exported asm symbols to zero") we
+produce a scary warning on x86, this commit fixes that.
+
+Signed-off-by: Adam Borowski <kilobyte@angband.pl>
+Tested-by: Kalle Valo <kvalo@codeaurora.org>
+Acked-by: Nicholas Piggin <npiggin@gmail.com>
+Tested-by: Peter Wu <peter@lekensteyn.nl>
+Tested-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: Michal Marek <mmarek@suse.com>
+Signed-off-by: Razvan Ghitulete <rga@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/asm-prototypes.h |   16 ++++++++++++++++
+ include/asm-generic/asm-prototypes.h  |    7 +++++++
+ 2 files changed, 23 insertions(+)
+
+--- /dev/null
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -0,0 +1,16 @@
++#include <asm/ftrace.h>
++#include <asm/uaccess.h>
++#include <asm/string.h>
++#include <asm/page.h>
++#include <asm/checksum.h>
++
++#include <asm-generic/asm-prototypes.h>
++
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/special_insns.h>
++#include <asm/preempt.h>
++
++#ifndef CONFIG_X86_CMPXCHG64
++extern void cmpxchg8b_emu(void);
++#endif
+--- /dev/null
++++ b/include/asm-generic/asm-prototypes.h
+@@ -0,0 +1,7 @@
++#include <linux/bitops.h>
++extern void *__memset(void *, int, __kernel_size_t);
++extern void *__memcpy(void *, const void *, __kernel_size_t);
++extern void *__memmove(void *, const void *, __kernel_size_t);
++extern void *memset(void *, int, __kernel_size_t);
++extern void *memcpy(void *, const void *, __kernel_size_t);
++extern void *memmove(void *, const void *, __kernel_size_t);
diff --git a/queue-4.4/x86-mm-32-move-setup_clear_cpu_cap-x86_feature_pcid-earlier.patch b/queue-4.4/x86-mm-32-move-setup_clear_cpu_cap-x86_feature_pcid-earlier.patch
new file mode 100644 (file)
index 0000000..ffd298b
--- /dev/null
@@ -0,0 +1,63 @@
+From b8b7abaed7a49b350f8ba659ddc264b04931d581 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sun, 17 Sep 2017 09:03:50 -0700
+Subject: x86/mm/32: Move setup_clear_cpu_cap(X86_FEATURE_PCID) earlier
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit b8b7abaed7a49b350f8ba659ddc264b04931d581 upstream.
+
+Otherwise we might have the PCID feature bit set during cpu_init().
+
+This is just for robustness.  I haven't seen any actual bugs here.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bpetkov@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: cba4671af755 ("x86/mm: Disable PCID on 32-bit kernels")
+Link: http://lkml.kernel.org/r/b16dae9d6b0db5d9801ddbebbfd83384097c61f3.1505663533.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/bugs.c   |    8 --------
+ arch/x86/kernel/cpu/common.c |    8 ++++++++
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -22,14 +22,6 @@
+ void __init check_bugs(void)
+ {
+-#ifdef CONFIG_X86_32
+-      /*
+-       * Regardless of whether PCID is enumerated, the SDM says
+-       * that it can't be enabled in 32-bit mode.
+-       */
+-      setup_clear_cpu_cap(X86_FEATURE_PCID);
+-#endif
+-
+       identify_boot_cpu();
+       if (!IS_ENABLED(CONFIG_SMP)) {
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -838,6 +838,14 @@ static void __init early_identify_cpu(st
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+       fpu__init_system(c);
++
++#ifdef CONFIG_X86_32
++      /*
++       * Regardless of whether PCID is enumerated, the SDM says
++       * that it can't be enabled in 32-bit mode.
++       */
++      setup_clear_cpu_cap(X86_FEATURE_PCID);
++#endif
+ }
+ void __init early_cpu_init(void)