]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 Mar 2018 17:43:00 +0000 (18:43 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 Mar 2018 17:43:00 +0000 (18:43 +0100)
added patches:
x86-boot-objtool-annotate-indirect-jump-in-secondary_startup_64.patch
x86-mm-sme-objtool-annotate-indirect-call-in-sme_encrypt_execute.patch
x86-paravirt-objtool-annotate-indirect-calls.patch

queue-4.15/series
queue-4.15/x86-boot-objtool-annotate-indirect-jump-in-secondary_startup_64.patch [new file with mode: 0644]
queue-4.15/x86-mm-sme-objtool-annotate-indirect-call-in-sme_encrypt_execute.patch [new file with mode: 0644]
queue-4.15/x86-paravirt-objtool-annotate-indirect-calls.patch [new file with mode: 0644]

index f268a6c24a9e6460fb6475f021c30f2e9df1e696..9b2ffcc7da7b92cb06aa2b3a9f4d0700344776db 100644 (file)
@@ -90,3 +90,6 @@ revert-x86-retpoline-simplify-vmexit_fill_rsb.patch
 x86-speculation-use-ibrs-if-available-before-calling-into-firmware.patch
 x86-retpoline-support-retpoline-builds-with-clang.patch
 x86-speculation-objtool-annotate-indirect-calls-jumps-for-objtool.patch
+x86-paravirt-objtool-annotate-indirect-calls.patch
+x86-boot-objtool-annotate-indirect-jump-in-secondary_startup_64.patch
+x86-mm-sme-objtool-annotate-indirect-call-in-sme_encrypt_execute.patch
diff --git a/queue-4.15/x86-boot-objtool-annotate-indirect-jump-in-secondary_startup_64.patch b/queue-4.15/x86-boot-objtool-annotate-indirect-jump-in-secondary_startup_64.patch
new file mode 100644 (file)
index 0000000..1ace9e9
--- /dev/null
@@ -0,0 +1,51 @@
+From bd89004f6305cbf7352238f61da093207ee518d6 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 16 Jan 2018 10:38:09 +0100
+Subject: x86/boot, objtool: Annotate indirect jump in secondary_startup_64()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit bd89004f6305cbf7352238f61da093207ee518d6 upstream.
+
+The objtool retpoline validation found this indirect jump. Seeing how
+it's on CPU bringup before we run userspace it should be safe, annotate
+it.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/head_64.S |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -23,6 +23,7 @@
+ #include <asm/nops.h>
+ #include "../entry/calling.h"
+ #include <asm/export.h>
++#include <asm/nospec-branch.h>
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/asm-offsets.h>
+@@ -134,6 +135,7 @@ ENTRY(secondary_startup_64)
+       /* Ensure I am executing from virtual addresses */
+       movq    $1f, %rax
++      ANNOTATE_RETPOLINE_SAFE
+       jmp     *%rax
+ 1:
+       UNWIND_HINT_EMPTY
diff --git a/queue-4.15/x86-mm-sme-objtool-annotate-indirect-call-in-sme_encrypt_execute.patch b/queue-4.15/x86-mm-sme-objtool-annotate-indirect-call-in-sme_encrypt_execute.patch
new file mode 100644 (file)
index 0000000..e542724
--- /dev/null
@@ -0,0 +1,51 @@
+From 531bb52a869a9c6e08c8d17ba955fcbfc18037ad Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 23 Jan 2018 16:18:50 +0100
+Subject: x86/mm/sme, objtool: Annotate indirect call in sme_encrypt_execute()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 531bb52a869a9c6e08c8d17ba955fcbfc18037ad upstream.
+
+This is boot code and thus Spectre-safe: we run this _way_ before userspace
+comes along to have a chance to poison our branch predictor.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/mem_encrypt_boot.S |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/mm/mem_encrypt_boot.S
++++ b/arch/x86/mm/mem_encrypt_boot.S
+@@ -15,6 +15,7 @@
+ #include <asm/page.h>
+ #include <asm/processor-flags.h>
+ #include <asm/msr-index.h>
++#include <asm/nospec-branch.h>
+       .text
+       .code64
+@@ -59,6 +60,7 @@ ENTRY(sme_encrypt_execute)
+       movq    %rax, %r8               /* Workarea encryption routine */
+       addq    $PAGE_SIZE, %r8         /* Workarea intermediate copy buffer */
++      ANNOTATE_RETPOLINE_SAFE
+       call    *%rax                   /* Call the encryption routine */
+       pop     %r12
diff --git a/queue-4.15/x86-paravirt-objtool-annotate-indirect-calls.patch b/queue-4.15/x86-paravirt-objtool-annotate-indirect-calls.patch
new file mode 100644 (file)
index 0000000..6b58a83
--- /dev/null
@@ -0,0 +1,128 @@
+From 3010a0663fd949d122eca0561b06b0a9453f7866 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 17 Jan 2018 16:58:11 +0100
+Subject: x86/paravirt, objtool: Annotate indirect calls
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 3010a0663fd949d122eca0561b06b0a9453f7866 upstream.
+
+Paravirt emits indirect calls which get flagged by objtool retpoline
+checks, annotate it away because all these indirect calls will be
+patched out before we start userspace.
+
+This patching happens through alternative_instructions() ->
+apply_paravirt() -> pv_init_ops.patch() which will eventually end up
+in paravirt_patch_default(). This function _will_ write direct
+alternatives.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/paravirt.h       |   17 +++++++++++++----
+ arch/x86/include/asm/paravirt_types.h |    5 ++++-
+ 2 files changed, 17 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -7,6 +7,7 @@
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/pgtable_types.h>
+ #include <asm/asm.h>
++#include <asm/nospec-branch.h>
+ #include <asm/paravirt_types.h>
+@@ -879,23 +880,27 @@ extern void default_banner(void);
+ #define INTERRUPT_RETURN                                              \
+       PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
+-                jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
++                ANNOTATE_RETPOLINE_SAFE;                                      \
++                jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);)
+ #define DISABLE_INTERRUPTS(clobbers)                                  \
+       PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
+                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
++                ANNOTATE_RETPOLINE_SAFE;                                      \
+                 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
+                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+ #define ENABLE_INTERRUPTS(clobbers)                                   \
+       PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
+                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
++                ANNOTATE_RETPOLINE_SAFE;                                      \
+                 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
+                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+ #ifdef CONFIG_X86_32
+ #define GET_CR0_INTO_EAX                              \
+       push %ecx; push %edx;                           \
++      ANNOTATE_RETPOLINE_SAFE;                                \
+       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
+       pop %edx; pop %ecx
+ #else /* !CONFIG_X86_32 */
+@@ -917,21 +922,25 @@ extern void default_banner(void);
+  */
+ #define SWAPGS                                                                \
+       PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
+-                call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
++                ANNOTATE_RETPOLINE_SAFE;                                      \
++                call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs);         \
+                )
+ #define GET_CR2_INTO_RAX                              \
+-      call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
++      ANNOTATE_RETPOLINE_SAFE;                                \
++      call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2);
+ #define USERGS_SYSRET64                                                       \
+       PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
+                 CLBR_NONE,                                            \
+-                jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
++                ANNOTATE_RETPOLINE_SAFE;                                      \
++                jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);)
+ #ifdef CONFIG_DEBUG_ENTRY
+ #define SAVE_FLAGS(clobbers)                                        \
+       PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
+                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
++                ANNOTATE_RETPOLINE_SAFE;                                  \
+                 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl);    \
+                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+ #endif
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -43,6 +43,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/kmap_types.h>
+ #include <asm/pgtable_types.h>
++#include <asm/nospec-branch.h>
+ struct page;
+ struct thread_struct;
+@@ -392,7 +393,9 @@ int paravirt_disable_iospace(void);
+  * offset into the paravirt_patch_template structure, and can therefore be
+  * freely converted back into a structure offset.
+  */
+-#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
++#define PARAVIRT_CALL                                 \
++      ANNOTATE_RETPOLINE_SAFE                         \
++      "call *%c[paravirt_opptr];"
+ /*
+  * These macros are intended to wrap calls through one of the paravirt