--- /dev/null
+From foo@baz Wed May 23 19:42:20 CEST 2018
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Wed, 23 May 2018 18:21:28 +0200
+Subject: s390: add assembler macros for CPU alternatives
+To: stable@vger.kernel.org
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Message-ID: <1527092496-24207-2-git-send-email-schwidefsky@de.ibm.com>
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+[ Upstream commit fba9eb7946251d6e420df3bdf7bc45195be7be9a ]
+
+Add a header with macros usable in assembler files to emit alternative
+code sequences. It works analog to the alternatives for inline assmeblies
+in C files, with the same restrictions and capabilities.
+The syntax is
+
+ ALTERNATIVE "<default instructions sequence>", \
+ "<alternative instructions sequence>", \
+ "<features-bit>"
+and
+
+ ALTERNATIVE_2 "<default instructions sequence>", \
+ "<alternative instructions sqeuence #1>", \
+ "<feature-bit #1>",
+ "<alternative instructions sqeuence #2>", \
+ "<feature-bit #2>"
+
+Reviewed-by: Vasily Gorbik <gor@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/include/asm/alternative-asm.h | 108 ++++++++++++++++++++++++++++++++
+ 1 file changed, 108 insertions(+)
+ create mode 100644 arch/s390/include/asm/alternative-asm.h
+
+--- /dev/null
++++ b/arch/s390/include/asm/alternative-asm.h
+@@ -0,0 +1,108 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_S390_ALTERNATIVE_ASM_H
++#define _ASM_S390_ALTERNATIVE_ASM_H
++
++#ifdef __ASSEMBLY__
++
++/*
++ * Check the length of an instruction sequence. The length may not be larger
++ * than 254 bytes and it has to be divisible by 2.
++ */
++.macro alt_len_check start,end
++ .if ( \end - \start ) > 254
++ .error "cpu alternatives does not support instructions blocks > 254 bytes\n"
++ .endif
++ .if ( \end - \start ) % 2
++ .error "cpu alternatives instructions length is odd\n"
++ .endif
++.endm
++
++/*
++ * Issue one struct alt_instr descriptor entry (need to put it into
++ * the section .altinstructions, see below). This entry contains
++ * enough information for the alternatives patching code to patch an
++ * instruction. See apply_alternatives().
++ */
++.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
++ .long \orig_start - .
++ .long \alt_start - .
++ .word \feature
++ .byte \orig_end - \orig_start
++ .byte \alt_end - \alt_start
++.endm
++
++/*
++ * Fill up @bytes with nops. The macro emits 6-byte nop instructions
++ * for the bulk of the area, possibly followed by a 4-byte and/or
++ * a 2-byte nop if the size of the area is not divisible by 6.
++ */
++.macro alt_pad_fill bytes
++ .fill ( \bytes ) / 6, 6, 0xc0040000
++ .fill ( \bytes ) % 6 / 4, 4, 0x47000000
++ .fill ( \bytes ) % 6 % 4 / 2, 2, 0x0700
++.endm
++
++/*
++ * Fill up @bytes with nops. If the number of bytes is larger
++ * than 6, emit a jg instruction to branch over all nops, then
++ * fill an area of size (@bytes - 6) with nop instructions.
++ */
++.macro alt_pad bytes
++ .if ( \bytes > 0 )
++ .if ( \bytes > 6 )
++ jg . + \bytes
++ alt_pad_fill \bytes - 6
++ .else
++ alt_pad_fill \bytes
++ .endif
++ .endif
++.endm
++
++/*
++ * Define an alternative between two instructions. If @feature is
++ * present, early code in apply_alternatives() replaces @oldinstr with
++ * @newinstr. ".skip" directive takes care of proper instruction padding
++ * in case @newinstr is longer than @oldinstr.
++ */
++.macro ALTERNATIVE oldinstr, newinstr, feature
++ .pushsection .altinstr_replacement,"ax"
++770: \newinstr
++771: .popsection
++772: \oldinstr
++773: alt_len_check 770b, 771b
++ alt_len_check 772b, 773b
++ alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) )
++774: .pushsection .altinstructions,"a"
++ alt_entry 772b, 774b, 770b, 771b, \feature
++ .popsection
++.endm
++
++/*
++ * Define an alternative between two instructions. If @feature is
++ * present, early code in apply_alternatives() replaces @oldinstr with
++ * @newinstr. ".skip" directive takes care of proper instruction padding
++ * in case @newinstr is longer than @oldinstr.
++ */
++.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
++ .pushsection .altinstr_replacement,"ax"
++770: \newinstr1
++771: \newinstr2
++772: .popsection
++773: \oldinstr
++774: alt_len_check 770b, 771b
++ alt_len_check 771b, 772b
++ alt_len_check 773b, 774b
++ .if ( 771b - 770b > 772b - 771b )
++ alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) )
++ .else
++ alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) )
++ .endif
++775: .pushsection .altinstructions,"a"
++ alt_entry 773b, 775b, 770b, 771b,\feature1
++ alt_entry 773b, 775b, 771b, 772b,\feature2
++ .popsection
++.endm
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
--- /dev/null
+From foo@baz Wed May 23 19:42:20 CEST 2018
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Wed, 23 May 2018 18:21:35 +0200
+Subject: s390: extend expoline to BC instructions
+To: stable@vger.kernel.org
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Message-ID: <1527092496-24207-9-git-send-email-schwidefsky@de.ibm.com>
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+[ Upstream commit 6deaa3bbca804b2a3627fd685f75de64da7be535 ]
+
+The BPF JIT uses a 'b <disp>(%r<x>)' instruction in the definition
+of the sk_load_word and sk_load_half functions.
+
+Add support for branch-on-condition instructions contained in the
+thunk code of an expoline.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/include/asm/nospec-insn.h | 57 ++++++++++++++++++++++++++++++++++++
+ arch/s390/kernel/nospec-branch.c | 25 ++++++++++++---
+ 2 files changed, 77 insertions(+), 5 deletions(-)
+
+--- a/arch/s390/include/asm/nospec-insn.h
++++ b/arch/s390/include/asm/nospec-insn.h
+@@ -32,10 +32,18 @@
+ __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
+ .endm
+
++ .macro __THUNK_PROLOG_BC d0,r1,r2
++ __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
++ .endm
++
+ .macro __THUNK_BR r1,r2
+ jg __s390x_indirect_jump_r\r2\()use_r\r1
+ .endm
+
++ .macro __THUNK_BC d0,r1,r2
++ jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1
++ .endm
++
+ .macro __THUNK_BRASL r1,r2,r3
+ brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2
+ .endm
+@@ -78,6 +86,23 @@
+ .endif
+ .endm
+
++ .macro __DECODE_DRR expand,disp,reg,ruse
++ .set __decode_fail,1
++ .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \reg,%r\r1
++ .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \ruse,%r\r2
++ \expand \disp,\r1,\r2
++ .set __decode_fail,0
++ .endif
++ .endr
++ .endif
++ .endr
++ .if __decode_fail == 1
++ .error "__DECODE_DRR failed"
++ .endif
++ .endm
++
+ .macro __THUNK_EX_BR reg,ruse
+ # Be very careful when adding instructions to this macro!
+ # The ALTERNATIVE replacement code has a .+10 which targets
+@@ -98,12 +123,30 @@
+ 555: br \reg
+ .endm
+
++ .macro __THUNK_EX_BC disp,reg,ruse
++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
++ exrl 0,556f
++ j .
++#else
++ larl \ruse,556f
++ ex 0,0(\ruse)
++ j .
++#endif
++556: b \disp(\reg)
++ .endm
++
+ .macro GEN_BR_THUNK reg,ruse=%r1
+ __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
+ __THUNK_EX_BR \reg,\ruse
+ __THUNK_EPILOG
+ .endm
+
++ .macro GEN_B_THUNK disp,reg,ruse=%r1
++ __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
++ __THUNK_EX_BC \disp,\reg,\ruse
++ __THUNK_EPILOG
++ .endm
++
+ .macro BR_EX reg,ruse=%r1
+ 557: __DECODE_RR __THUNK_BR,\reg,\ruse
+ .pushsection .s390_indirect_branches,"a",@progbits
+@@ -111,6 +154,13 @@
+ .popsection
+ .endm
+
++ .macro B_EX disp,reg,ruse=%r1
++558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
++ .pushsection .s390_indirect_branches,"a",@progbits
++ .long 558b-.
++ .popsection
++ .endm
++
+ .macro BASR_EX rsave,rtarget,ruse=%r1
+ 559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
+ .pushsection .s390_indirect_branches,"a",@progbits
+@@ -122,10 +172,17 @@
+ .macro GEN_BR_THUNK reg,ruse=%r1
+ .endm
+
++ .macro GEN_B_THUNK disp,reg,ruse=%r1
++ .endm
++
+ .macro BR_EX reg,ruse=%r1
+ br \reg
+ .endm
+
++ .macro B_EX disp,reg,ruse=%r1
++ b \disp(\reg)
++ .endm
++
+ .macro BASR_EX rsave,rtarget,ruse=%r1
+ basr \rsave,\rtarget
+ .endm
+--- a/arch/s390/kernel/nospec-branch.c
++++ b/arch/s390/kernel/nospec-branch.c
+@@ -94,7 +94,6 @@ static void __init_or_module __nospec_re
+ s32 *epo;
+
+ /* Second part of the instruction replace is always a nop */
+- memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
+ for (epo = start; epo < end; epo++) {
+ instr = (u8 *) epo + *epo;
+ if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
+@@ -115,18 +114,34 @@ static void __init_or_module __nospec_re
+ br = thunk + (*(int *)(thunk + 2)) * 2;
+ else
+ continue;
+- if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
++ /* Check for unconditional branch 0x07f? or 0x47f???? */
++ if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
+ continue;
++
++ memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
+ switch (type) {
+ case BRCL_EXPOLINE:
+- /* brcl to thunk, replace with br + nop */
+ insnbuf[0] = br[0];
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
++ if (br[0] == 0x47) {
++ /* brcl to b, replace with bc + nopr */
++ insnbuf[2] = br[2];
++ insnbuf[3] = br[3];
++ } else {
++ /* brcl to br, replace with bcr + nop */
++ }
+ break;
+ case BRASL_EXPOLINE:
+- /* brasl to thunk, replace with basr + nop */
+- insnbuf[0] = 0x0d;
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
++ if (br[0] == 0x47) {
++ /* brasl to b, replace with bas + nopr */
++ insnbuf[0] = 0x4d;
++ insnbuf[2] = br[2];
++ insnbuf[3] = br[3];
++ } else {
++ /* brasl to br, replace with basr + nop */
++ insnbuf[0] = 0x0d;
++ }
+ break;
+ }
+
--- /dev/null
+From foo@baz Wed May 23 19:42:20 CEST 2018
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Wed, 23 May 2018 18:21:31 +0200
+Subject: s390/ftrace: use expoline for indirect branches
+To: stable@vger.kernel.org
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Message-ID: <1527092496-24207-5-git-send-email-schwidefsky@de.ibm.com>
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+[ Upstream commit 23a4d7fd34856da8218c4cfc23dba7a6ec0a423a ]
+
+The return from the ftrace_stub, _mcount, ftrace_caller and
+return_to_handler functions is done with "br %r14" and "br %r1".
+These are indirect branches as well and need to use execute
+trampolines for CONFIG_EXPOLINE=y.
+
+The ftrace_caller function is a special case as it returns to the
+start of a function and may only use %r0 and %r1. For a pre z10
+machine the standard execute trampoline uses a LARL + EX to do
+this, but this requires *two* registers in the range %r1..%r15.
+To get around this the 'br %r1' located in the lowcore is used,
+then the EX instruction does not need an address register.
+But the lowcore trick may only be used for pre z14 machines,
+with noexec=on the mapping for the first page may not contain
+instructions. The solution for that is an ALTERNATIVE in the
+expoline THUNK generated by 'GEN_BR_THUNK %r1' to switch to
+EXRL, this relies on the fact that a machine that supports
+noexec=on has EXRL as well.
+
+Cc: stable@vger.kernel.org # 4.16
+Fixes: f19fbd5ed6 ("s390: introduce execute-trampolines for branches")
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/include/asm/nospec-insn.h | 11 +++++++++++
+ arch/s390/kernel/asm-offsets.c | 1 +
+ arch/s390/kernel/mcount.S | 14 +++++++++-----
+ 3 files changed, 21 insertions(+), 5 deletions(-)
+
+--- a/arch/s390/include/asm/nospec-insn.h
++++ b/arch/s390/include/asm/nospec-insn.h
+@@ -2,6 +2,9 @@
+ #ifndef _ASM_S390_NOSPEC_ASM_H
+ #define _ASM_S390_NOSPEC_ASM_H
+
++#include <asm/alternative-asm.h>
++#include <asm/asm-offsets.h>
++
+ #ifdef __ASSEMBLY__
+
+ #ifdef CONFIG_EXPOLINE
+@@ -76,13 +79,21 @@
+ .endm
+
+ .macro __THUNK_EX_BR reg,ruse
++ # Be very careful when adding instructions to this macro!
++ # The ALTERNATIVE replacement code has a .+10 which targets
++ # the "br \reg" after the code has been patched.
+ #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+ exrl 0,555f
+ j .
+ #else
++ .ifc \reg,%r1
++ ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
++ j .
++ .else
+ larl \ruse,555f
+ ex 0,0(\ruse)
+ j .
++ .endif
+ #endif
+ 555: br \reg
+ .endm
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -170,6 +170,7 @@ int main(void)
+ OFFSET(__LC_MACHINE_FLAGS, _lowcore, machine_flags);
+ OFFSET(__LC_GMAP, _lowcore, gmap);
+ OFFSET(__LC_PASTE, _lowcore, paste);
++ OFFSET(__LC_BR_R1, _lowcore, br_r1_trampoline);
+ /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
+ OFFSET(__LC_DUMP_REIPL, _lowcore, ipib);
+ /* hardware defined lowcore locations 0x1000 - 0x18ff */
+--- a/arch/s390/kernel/mcount.S
++++ b/arch/s390/kernel/mcount.S
+@@ -8,12 +8,16 @@
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/ftrace.h>
++#include <asm/nospec-insn.h>
+ #include <asm/ptrace.h>
+
++ GEN_BR_THUNK %r1
++ GEN_BR_THUNK %r14
++
+ .section .kprobes.text, "ax"
+
+ ENTRY(ftrace_stub)
+- br %r14
++ BR_EX %r14
+
+ #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
+ #define STACK_PTREGS (STACK_FRAME_OVERHEAD)
+@@ -21,7 +25,7 @@ ENTRY(ftrace_stub)
+ #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
+
+ ENTRY(_mcount)
+- br %r14
++ BR_EX %r14
+
+ ENTRY(ftrace_caller)
+ .globl ftrace_regs_caller
+@@ -49,7 +53,7 @@ ENTRY(ftrace_caller)
+ #endif
+ lgr %r3,%r14
+ la %r5,STACK_PTREGS(%r15)
+- basr %r14,%r1
++ BASR_EX %r14,%r1
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ # The j instruction gets runtime patched to a nop instruction.
+ # See ftrace_enable_ftrace_graph_caller.
+@@ -64,7 +68,7 @@ ftrace_graph_caller_end:
+ #endif
+ lg %r1,(STACK_PTREGS_PSW+8)(%r15)
+ lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
+- br %r1
++ BR_EX %r1
+
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+@@ -77,6 +81,6 @@ ENTRY(return_to_handler)
+ aghi %r15,STACK_FRAME_OVERHEAD
+ lgr %r14,%r2
+ lmg %r2,%r5,32(%r15)
+- br %r14
++ BR_EX %r14
+
+ #endif
--- /dev/null
+From foo@baz Wed May 23 19:42:20 CEST 2018
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Wed, 23 May 2018 18:21:32 +0200
+Subject: s390/kernel: use expoline for indirect branches
+To: stable@vger.kernel.org
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Message-ID: <1527092496-24207-6-git-send-email-schwidefsky@de.ibm.com>
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+[ Upstream commit c50c84c3ac4d5db683904bdb3257798b6ef980ae ]
+
+The assember code in arch/s390/kernel uses a few more indirect branches
+which need to be done with execute trampolines for CONFIG_EXPOLINE=y.
+
+Cc: stable@vger.kernel.org # 4.16
+Fixes: f19fbd5ed6 ("s390: introduce execute-trampolines for branches")
+Reviewed-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kernel/base.S | 24 ++++++++++++++----------
+ arch/s390/kernel/reipl.S | 5 ++++-
+ arch/s390/kernel/swsusp.S | 10 ++++++----
+ 3 files changed, 24 insertions(+), 15 deletions(-)
+
+--- a/arch/s390/kernel/base.S
++++ b/arch/s390/kernel/base.S
+@@ -8,18 +8,22 @@
+
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
++#include <asm/nospec-insn.h>
+ #include <asm/ptrace.h>
+ #include <asm/sigp.h>
+
++ GEN_BR_THUNK %r9
++ GEN_BR_THUNK %r14
++
+ ENTRY(s390_base_mcck_handler)
+ basr %r13,0
+ 0: lg %r15,__LC_PANIC_STACK # load panic stack
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ larl %r1,s390_base_mcck_handler_fn
+- lg %r1,0(%r1)
+- ltgr %r1,%r1
++ lg %r9,0(%r1)
++ ltgr %r9,%r9
+ jz 1f
+- basr %r14,%r1
++ BASR_EX %r14,%r9
+ 1: la %r1,4095
+ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
+ lpswe __LC_MCK_OLD_PSW
+@@ -36,10 +40,10 @@ ENTRY(s390_base_ext_handler)
+ basr %r13,0
+ 0: aghi %r15,-STACK_FRAME_OVERHEAD
+ larl %r1,s390_base_ext_handler_fn
+- lg %r1,0(%r1)
+- ltgr %r1,%r1
++ lg %r9,0(%r1)
++ ltgr %r9,%r9
+ jz 1f
+- basr %r14,%r1
++ BASR_EX %r14,%r9
+ 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
+ ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
+ lpswe __LC_EXT_OLD_PSW
+@@ -56,10 +60,10 @@ ENTRY(s390_base_pgm_handler)
+ basr %r13,0
+ 0: aghi %r15,-STACK_FRAME_OVERHEAD
+ larl %r1,s390_base_pgm_handler_fn
+- lg %r1,0(%r1)
+- ltgr %r1,%r1
++ lg %r9,0(%r1)
++ ltgr %r9,%r9
+ jz 1f
+- basr %r14,%r1
++ BASR_EX %r14,%r9
+ lmg %r0,%r15,__LC_SAVE_AREA_SYNC
+ lpswe __LC_PGM_OLD_PSW
+ 1: lpswe disabled_wait_psw-0b(%r13)
+@@ -116,7 +120,7 @@ ENTRY(diag308_reset)
+ larl %r4,.Lcontinue_psw # Restore PSW flags
+ lpswe 0(%r4)
+ .Lcontinue:
+- br %r14
++ BR_EX %r14
+ .align 16
+ .Lrestart_psw:
+ .long 0x00080000,0x80000000 + .Lrestart_part2
+--- a/arch/s390/kernel/reipl.S
++++ b/arch/s390/kernel/reipl.S
+@@ -6,8 +6,11 @@
+
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
++#include <asm/nospec-insn.h>
+ #include <asm/sigp.h>
+
++ GEN_BR_THUNK %r14
++
+ #
+ # store_status
+ #
+@@ -62,7 +65,7 @@ ENTRY(store_status)
+ st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
+ larl %r2,store_status
+ stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
+- br %r14
++ BR_EX %r14
+
+ .section .bss
+ .align 8
+--- a/arch/s390/kernel/swsusp.S
++++ b/arch/s390/kernel/swsusp.S
+@@ -12,6 +12,7 @@
+ #include <asm/ptrace.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm-offsets.h>
++#include <asm/nospec-insn.h>
+ #include <asm/sigp.h>
+
+ /*
+@@ -23,6 +24,8 @@
+ * (see below) in the resume process.
+ * This function runs with disabled interrupts.
+ */
++ GEN_BR_THUNK %r14
++
+ .section .text
+ ENTRY(swsusp_arch_suspend)
+ stmg %r6,%r15,__SF_GPRS(%r15)
+@@ -102,7 +105,7 @@ ENTRY(swsusp_arch_suspend)
+ spx 0x318(%r1)
+ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ lghi %r2,0
+- br %r14
++ BR_EX %r14
+
+ /*
+ * Restore saved memory image to correct place and restore register context.
+@@ -196,11 +199,10 @@ pgm_check_entry:
+ larl %r15,init_thread_union
+ ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER)
+ larl %r2,.Lpanic_string
+- larl %r3,_sclp_print_early
+ lghi %r1,0
+ sam31
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE
+- basr %r14,%r3
++ brasl %r14,_sclp_print_early
+ larl %r3,.Ldisabled_wait_31
+ lpsw 0(%r3)
+ 4:
+@@ -266,7 +268,7 @@ restore_registers:
+ /* Return 0 */
+ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ lghi %r2,0
+- br %r14
++ BR_EX %r14
+
+ .section .data..nosave,"aw",@progbits
+ .align 8
--- /dev/null
+From foo@baz Wed May 23 19:42:20 CEST 2018
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Wed, 23 May 2018 18:21:30 +0200
+Subject: s390/lib: use expoline for indirect branches
+To: stable@vger.kernel.org
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Message-ID: <1527092496-24207-4-git-send-email-schwidefsky@de.ibm.com>
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+[ Upstream commit 97489e0663fa700d6e7febddc43b58df98d7bcda ]
+
+The return from the memmove, memset, memcpy, __memset16, __memset32 and
+__memset64 functions are done with "br %r14". These are indirect branches
+as well and need to use execute trampolines for CONFIG_EXPOLINE=y.
+
+Cc: stable@vger.kernel.org # 4.16
+Fixes: f19fbd5ed6 ("s390: introduce execute-trampolines for branches")
+Reviewed-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/lib/mem.S | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/arch/s390/lib/mem.S
++++ b/arch/s390/lib/mem.S
+@@ -5,6 +5,9 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/nospec-insn.h>
++
++ GEN_BR_THUNK %r14
+
+ /*
+ * memset implementation
+@@ -38,7 +41,7 @@ ENTRY(memset)
+ .Lmemset_clear_rest:
+ larl %r3,.Lmemset_xc
+ ex %r4,0(%r3)
+- br %r14
++ BR_EX %r14
+ .Lmemset_fill:
+ stc %r3,0(%r2)
+ cghi %r4,1
+@@ -55,7 +58,7 @@ ENTRY(memset)
+ .Lmemset_fill_rest:
+ larl %r3,.Lmemset_mvc
+ ex %r4,0(%r3)
+- br %r14
++ BR_EX %r14
+ .Lmemset_xc:
+ xc 0(1,%r1),0(%r1)
+ .Lmemset_mvc:
+@@ -77,7 +80,7 @@ ENTRY(memcpy)
+ .Lmemcpy_rest:
+ larl %r5,.Lmemcpy_mvc
+ ex %r4,0(%r5)
+- br %r14
++ BR_EX %r14
+ .Lmemcpy_loop:
+ mvc 0(256,%r1),0(%r3)
+ la %r1,256(%r1)
--- /dev/null
+From foo@baz Wed May 23 19:42:20 CEST 2018
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Wed, 23 May 2018 18:21:29 +0200
+Subject: s390: move expoline assembler macros to a header
+To: stable@vger.kernel.org
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Message-ID: <1527092496-24207-3-git-send-email-schwidefsky@de.ibm.com>
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+[ Upstream commit 6dd85fbb87d1d6b87a3b1f02ca28d7b2abd2e7ba ]
+
+To be able to use the expoline branches in different assembler
+files move the associated macros from entry.S to a new header
+nospec-insn.h.
+
+While we are at it make the macros a bit nicer to use.
+
+Cc: stable@vger.kernel.org # 4.16
+Fixes: f19fbd5ed6 ("s390: introduce execute-trampolines for branches")
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/include/asm/nospec-insn.h | 125 ++++++++++++++++++++++++++++++++++++
+ arch/s390/kernel/entry.S | 105 ++++++------------------------
+ 2 files changed, 149 insertions(+), 81 deletions(-)
+ create mode 100644 arch/s390/include/asm/nospec-insn.h
+
+--- /dev/null
++++ b/arch/s390/include/asm/nospec-insn.h
+@@ -0,0 +1,125 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_S390_NOSPEC_ASM_H
++#define _ASM_S390_NOSPEC_ASM_H
++
++#ifdef __ASSEMBLY__
++
++#ifdef CONFIG_EXPOLINE
++
++/*
++ * The expoline macros are used to create thunks in the same format
++ * as gcc generates them. The 'comdat' section flag makes sure that
++ * the various thunks are merged into a single copy.
++ */
++ .macro __THUNK_PROLOG_NAME name
++ .pushsection .text.\name,"axG",@progbits,\name,comdat
++ .globl \name
++ .hidden \name
++ .type \name,@function
++\name:
++ .cfi_startproc
++ .endm
++
++ .macro __THUNK_EPILOG
++ .cfi_endproc
++ .popsection
++ .endm
++
++ .macro __THUNK_PROLOG_BR r1,r2
++ __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
++ .endm
++
++ .macro __THUNK_BR r1,r2
++ jg __s390x_indirect_jump_r\r2\()use_r\r1
++ .endm
++
++ .macro __THUNK_BRASL r1,r2,r3
++ brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2
++ .endm
++
++ .macro __DECODE_RR expand,reg,ruse
++ .set __decode_fail,1
++ .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \reg,%r\r1
++ .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \ruse,%r\r2
++ \expand \r1,\r2
++ .set __decode_fail,0
++ .endif
++ .endr
++ .endif
++ .endr
++ .if __decode_fail == 1
++ .error "__DECODE_RR failed"
++ .endif
++ .endm
++
++ .macro __DECODE_RRR expand,rsave,rtarget,ruse
++ .set __decode_fail,1
++ .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \rsave,%r\r1
++ .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \rtarget,%r\r2
++ .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \ruse,%r\r3
++ \expand \r1,\r2,\r3
++ .set __decode_fail,0
++ .endif
++ .endr
++ .endif
++ .endr
++ .endif
++ .endr
++ .if __decode_fail == 1
++ .error "__DECODE_RRR failed"
++ .endif
++ .endm
++
++ .macro __THUNK_EX_BR reg,ruse
++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
++ exrl 0,555f
++ j .
++#else
++ larl \ruse,555f
++ ex 0,0(\ruse)
++ j .
++#endif
++555: br \reg
++ .endm
++
++ .macro GEN_BR_THUNK reg,ruse=%r1
++ __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
++ __THUNK_EX_BR \reg,\ruse
++ __THUNK_EPILOG
++ .endm
++
++ .macro BR_EX reg,ruse=%r1
++557: __DECODE_RR __THUNK_BR,\reg,\ruse
++ .pushsection .s390_indirect_branches,"a",@progbits
++ .long 557b-.
++ .popsection
++ .endm
++
++ .macro BASR_EX rsave,rtarget,ruse=%r1
++559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
++ .pushsection .s390_indirect_branches,"a",@progbits
++ .long 559b-.
++ .popsection
++ .endm
++
++#else
++ .macro GEN_BR_THUNK reg,ruse=%r1
++ .endm
++
++ .macro BR_EX reg,ruse=%r1
++ br \reg
++ .endm
++
++ .macro BASR_EX rsave,rtarget,ruse=%r1
++ basr \rsave,\rtarget
++ .endm
++#endif
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ASM_S390_NOSPEC_ASM_H */
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -23,6 +23,7 @@
+ #include <asm/vx-insn.h>
+ #include <asm/setup.h>
+ #include <asm/nmi.h>
++#include <asm/nospec-insn.h>
+
+ __PT_R0 = __PT_GPRS
+ __PT_R1 = __PT_GPRS + 8
+@@ -225,74 +226,16 @@ _PIF_WORK = (_PIF_PER_TRAP)
+ .popsection
+ .endm
+
+-#ifdef CONFIG_EXPOLINE
+-
+- .macro GEN_BR_THUNK name,reg,tmp
+- .section .text.\name,"axG",@progbits,\name,comdat
+- .globl \name
+- .hidden \name
+- .type \name,@function
+-\name:
+- .cfi_startproc
+-#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+- exrl 0,0f
+-#else
+- larl \tmp,0f
+- ex 0,0(\tmp)
+-#endif
+- j .
+-0: br \reg
+- .cfi_endproc
+- .endm
+-
+- GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
+- GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
+- GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
+-
+- .macro BASR_R14_R9
+-0: brasl %r14,__s390x_indirect_jump_r1use_r9
+- .pushsection .s390_indirect_branches,"a",@progbits
+- .long 0b-.
+- .popsection
+- .endm
+-
+- .macro BR_R1USE_R14
+-0: jg __s390x_indirect_jump_r1use_r14
+- .pushsection .s390_indirect_branches,"a",@progbits
+- .long 0b-.
+- .popsection
+- .endm
+-
+- .macro BR_R11USE_R14
+-0: jg __s390x_indirect_jump_r11use_r14
+- .pushsection .s390_indirect_branches,"a",@progbits
+- .long 0b-.
+- .popsection
+- .endm
+-
+-#else /* CONFIG_EXPOLINE */
+-
+- .macro BASR_R14_R9
+- basr %r14,%r9
+- .endm
+-
+- .macro BR_R1USE_R14
+- br %r14
+- .endm
+-
+- .macro BR_R11USE_R14
+- br %r14
+- .endm
+-
+-#endif /* CONFIG_EXPOLINE */
+-
++ GEN_BR_THUNK %r9
++ GEN_BR_THUNK %r14
++ GEN_BR_THUNK %r14,%r11
+
+ .section .kprobes.text, "ax"
+
+ ENTRY(__bpon)
+ .globl __bpon
+ BPON
+- BR_R1USE_R14
++ BR_EX %r14
+
+ /*
+ * Scheduler resume function, called by switch_to
+@@ -322,7 +265,7 @@ ENTRY(__switch_to)
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
+ jz 0f
+ .insn s,0xb2800000,__LC_LPP # set program parameter
+-0: BR_R1USE_R14
++0: BR_EX %r14
+
+ .L__critical_start:
+
+@@ -388,7 +331,7 @@ sie_exit:
+ xgr %r5,%r5
+ lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
+ lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
+- BR_R1USE_R14
++ BR_EX %r14
+ .Lsie_fault:
+ lghi %r14,-EFAULT
+ stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
+@@ -445,7 +388,7 @@ ENTRY(system_call)
+ lgf %r9,0(%r8,%r10) # get system call add.
+ TSTMSK __TI_flags(%r12),_TIF_TRACE
+ jnz .Lsysc_tracesys
+- BASR_R14_R9 # call sys_xxxx
++ BASR_EX %r14,%r9 # call sys_xxxx
+ stg %r2,__PT_R2(%r11) # store return value
+
+ .Lsysc_return:
+@@ -585,7 +528,7 @@ ENTRY(system_call)
+ lmg %r3,%r7,__PT_R3(%r11)
+ stg %r7,STACK_FRAME_OVERHEAD(%r15)
+ lg %r2,__PT_ORIG_GPR2(%r11)
+- BASR_R14_R9 # call sys_xxx
++ BASR_EX %r14,%r9 # call sys_xxx
+ stg %r2,__PT_R2(%r11) # store return value
+ .Lsysc_tracenogo:
+ TSTMSK __TI_flags(%r12),_TIF_TRACE
+@@ -609,7 +552,7 @@ ENTRY(ret_from_fork)
+ lmg %r9,%r10,__PT_R9(%r11) # load gprs
+ ENTRY(kernel_thread_starter)
+ la %r2,0(%r10)
+- BASR_R14_R9
++ BASR_EX %r14,%r9
+ j .Lsysc_tracenogo
+
+ /*
+@@ -685,7 +628,7 @@ ENTRY(pgm_check_handler)
+ je .Lpgm_return
+ lgf %r9,0(%r10,%r1) # load address of handler routine
+ lgr %r2,%r11 # pass pointer to pt_regs
+- BASR_R14_R9 # branch to interrupt-handler
++ BASR_EX %r14,%r9 # branch to interrupt-handler
+ .Lpgm_return:
+ LOCKDEP_SYS_EXIT
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+@@ -962,7 +905,7 @@ ENTRY(psw_idle)
+ stpt __TIMER_IDLE_ENTER(%r2)
+ .Lpsw_idle_lpsw:
+ lpswe __SF_EMPTY(%r15)
+- BR_R1USE_R14
++ BR_EX %r14
+ .Lpsw_idle_end:
+
+ /*
+@@ -1007,7 +950,7 @@ ENTRY(save_fpu_regs)
+ .Lsave_fpu_regs_done:
+ oi __LC_CPU_FLAGS+7,_CIF_FPU
+ .Lsave_fpu_regs_exit:
+- BR_R1USE_R14
++ BR_EX %r14
+ .Lsave_fpu_regs_end:
+
+ /*
+@@ -1054,7 +997,7 @@ load_fpu_regs:
+ .Lload_fpu_regs_done:
+ ni __LC_CPU_FLAGS+7,255-_CIF_FPU
+ .Lload_fpu_regs_exit:
+- BR_R1USE_R14
++ BR_EX %r14
+ .Lload_fpu_regs_end:
+
+ .L__critical_end:
+@@ -1227,7 +1170,7 @@ cleanup_critical:
+ jl 0f
+ clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
+ jl .Lcleanup_load_fpu_regs
+-0: BR_R11USE_R14
++0: BR_EX %r14
+
+ .align 8
+ .Lcleanup_table:
+@@ -1257,7 +1200,7 @@ cleanup_critical:
+ ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
+- BR_R11USE_R14
++ BR_EX %r14
+ #endif
+
+ .Lcleanup_system_call:
+@@ -1315,7 +1258,7 @@ cleanup_critical:
+ stg %r15,56(%r11) # r15 stack pointer
+ # set new psw address and exit
+ larl %r9,.Lsysc_do_svc
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+ .Lcleanup_system_call_insn:
+ .quad system_call
+ .quad .Lsysc_stmg
+@@ -1325,7 +1268,7 @@ cleanup_critical:
+
+ .Lcleanup_sysc_tif:
+ larl %r9,.Lsysc_tif
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+
+ .Lcleanup_sysc_restore:
+ # check if stpt has been executed
+@@ -1342,14 +1285,14 @@ cleanup_critical:
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+ 1: lmg %r8,%r9,__LC_RETURN_PSW
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+ .Lcleanup_sysc_restore_insn:
+ .quad .Lsysc_exit_timer
+ .quad .Lsysc_done - 4
+
+ .Lcleanup_io_tif:
+ larl %r9,.Lio_tif
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+
+ .Lcleanup_io_restore:
+ # check if stpt has been executed
+@@ -1363,7 +1306,7 @@ cleanup_critical:
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+ 1: lmg %r8,%r9,__LC_RETURN_PSW
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+ .Lcleanup_io_restore_insn:
+ .quad .Lio_exit_timer
+ .quad .Lio_done - 4
+@@ -1415,17 +1358,17 @@ cleanup_critical:
+ # prepare return psw
+ nihh %r8,0xfcfd # clear irq & wait state bits
+ lg %r9,48(%r11) # return from psw_idle
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+ .Lcleanup_idle_insn:
+ .quad .Lpsw_idle_lpsw
+
+ .Lcleanup_save_fpu_regs:
+ larl %r9,save_fpu_regs
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+
+ .Lcleanup_load_fpu_regs:
+ larl %r9,load_fpu_regs
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+
+ /*
+ * Integer constants
--- /dev/null
+From foo@baz Wed May 23 19:42:20 CEST 2018
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Wed, 23 May 2018 18:21:33 +0200
+Subject: s390: move spectre sysfs attribute code
+To: stable@vger.kernel.org
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Message-ID: <1527092496-24207-7-git-send-email-schwidefsky@de.ibm.com>
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+[ Upstream commit 4253b0e0627ee3461e64c2495c616f1c8f6b127b ]
+
+The nospec-branch.c file is compiled without the gcc options to
+generate expoline thunks. The return branch of the sysfs show
+functions cpu_show_spectre_v1 and cpu_show_spectre_v2 is an indirect
+branch as well. These need to be compiled with expolines.
+
+Move the sysfs functions for spectre reporting to a separate file
+and loose an '.' for one of the messages.
+
+Cc: stable@vger.kernel.org # 4.16
+Fixes: d424986f1d ("s390: add sysfs attributes for spectre")
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kernel/Makefile | 1 +
+ arch/s390/kernel/nospec-branch.c | 18 ------------------
+ arch/s390/kernel/nospec-sysfs.c | 21 +++++++++++++++++++++
+ 3 files changed, 22 insertions(+), 18 deletions(-)
+ create mode 100644 arch/s390/kernel/nospec-sysfs.c
+
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -49,6 +49,7 @@ obj-y += nospec-branch.o
+
+ extra-y += head.o head64.o vmlinux.lds
+
++obj-$(CONFIG_SYSFS) += nospec-sysfs.o
+ CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
+
+ obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
+--- a/arch/s390/kernel/nospec-branch.c
++++ b/arch/s390/kernel/nospec-branch.c
+@@ -44,24 +44,6 @@ static int __init nospec_report(void)
+ }
+ arch_initcall(nospec_report);
+
+-#ifdef CONFIG_SYSFS
+-ssize_t cpu_show_spectre_v1(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+-}
+-
+-ssize_t cpu_show_spectre_v2(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+- return sprintf(buf, "Mitigation: execute trampolines\n");
+- if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+- return sprintf(buf, "Mitigation: limited branch prediction.\n");
+- return sprintf(buf, "Vulnerable\n");
+-}
+-#endif
+-
+ #ifdef CONFIG_EXPOLINE
+
+ int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
+--- /dev/null
++++ b/arch/s390/kernel/nospec-sysfs.c
+@@ -0,0 +1,21 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/device.h>
++#include <linux/cpu.h>
++#include <asm/facility.h>
++#include <asm/nospec-branch.h>
++
++ssize_t cpu_show_spectre_v1(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
++ return sprintf(buf, "Mitigation: execute trampolines\n");
++ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
++ return sprintf(buf, "Mitigation: limited branch prediction\n");
++ return sprintf(buf, "Vulnerable\n");
++}
--- /dev/null
+From foo@baz Wed May 23 19:42:20 CEST 2018
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Wed, 23 May 2018 18:21:36 +0200
+Subject: s390: use expoline thunks in the BPF JIT
+To: stable@vger.kernel.org
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Message-ID: <1527092496-24207-10-git-send-email-schwidefsky@de.ibm.com>
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+[ Upstream commit de5cb6eb514ebe241e3edeb290cb41deb380b81d ]
+
+The BPF JIT need safe guarding against spectre v2 in the sk_load_xxx
+assembler stubs and the indirect branches generated by the JIT itself
+need to be converted to expolines.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/net/bpf_jit.S | 16 ++++++----
+ arch/s390/net/bpf_jit_comp.c | 63 +++++++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 71 insertions(+), 8 deletions(-)
+
+--- a/arch/s390/net/bpf_jit.S
++++ b/arch/s390/net/bpf_jit.S
+@@ -8,6 +8,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/nospec-insn.h>
+ #include "bpf_jit.h"
+
+ /*
+@@ -53,7 +54,7 @@ ENTRY(sk_load_##NAME##_pos); \
+ clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
+ jh sk_load_##NAME##_slow; \
+ LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
+- b OFF_OK(%r6); /* Return */ \
++ B_EX OFF_OK,%r6; /* Return */ \
+ \
+ sk_load_##NAME##_slow:; \
+ lgr %r2,%r7; /* Arg1 = skb pointer */ \
+@@ -63,11 +64,14 @@ sk_load_##NAME##_slow:; \
+ brasl %r14,skb_copy_bits; /* Get data from skb */ \
+ LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
+ ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
+- br %r6; /* Return */
++ BR_EX %r6; /* Return */
+
+ sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
+ sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
+
++ GEN_BR_THUNK %r6
++ GEN_B_THUNK OFF_OK,%r6
++
+ /*
+ * Load 1 byte from SKB (optimized version)
+ */
+@@ -79,7 +83,7 @@ ENTRY(sk_load_byte_pos)
+ clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
+ jnl sk_load_byte_slow
+ llgc %r14,0(%r3,%r12) # Get byte from skb
+- b OFF_OK(%r6) # Return OK
++ B_EX OFF_OK,%r6 # Return OK
+
+ sk_load_byte_slow:
+ lgr %r2,%r7 # Arg1 = skb pointer
+@@ -89,7 +93,7 @@ sk_load_byte_slow:
+ brasl %r14,skb_copy_bits # Get data from skb
+ llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
+ ltgr %r2,%r2 # Set cc to (%r2 != 0)
+- br %r6 # Return cc
++ BR_EX %r6 # Return cc
+
+ #define sk_negative_common(NAME, SIZE, LOAD) \
+ sk_load_##NAME##_slow_neg:; \
+@@ -103,7 +107,7 @@ sk_load_##NAME##_slow_neg:; \
+ jz bpf_error; \
+ LOAD %r14,0(%r2); /* Get data from pointer */ \
+ xr %r3,%r3; /* Set cc to zero */ \
+- br %r6; /* Return cc */
++ BR_EX %r6; /* Return cc */
+
+ sk_negative_common(word, 4, llgf)
+ sk_negative_common(half, 2, llgh)
+@@ -112,4 +116,4 @@ sk_negative_common(byte, 1, llgc)
+ bpf_error:
+ # force a return 0 from jit handler
+ ltgr %r15,%r15 # Set condition code
+- br %r6
++ BR_EX %r6
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -24,6 +24,8 @@
+ #include <linux/bpf.h>
+ #include <asm/cacheflush.h>
+ #include <asm/dis.h>
++#include <asm/facility.h>
++#include <asm/nospec-branch.h>
+ #include "bpf_jit.h"
+
+ int bpf_jit_enable __read_mostly;
+@@ -41,6 +43,8 @@ struct bpf_jit {
+ int base_ip; /* Base address for literal pool */
+ int ret0_ip; /* Address of return 0 */
+ int exit_ip; /* Address of exit */
++ int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
++ int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
+ int tail_call_start; /* Tail call start offset */
+ int labels[1]; /* Labels for local jumps */
+ };
+@@ -248,6 +252,19 @@ static inline void reg_set_seen(struct b
+ REG_SET_SEEN(b2); \
+ })
+
++#define EMIT6_PCREL_RILB(op, b, target) \
++({ \
++ int rel = (target - jit->prg) / 2; \
++ _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \
++ REG_SET_SEEN(b); \
++})
++
++#define EMIT6_PCREL_RIL(op, target) \
++({ \
++ int rel = (target - jit->prg) / 2; \
++ _EMIT6(op | rel >> 16, rel & 0xffff); \
++})
++
+ #define _EMIT6_IMM(op, imm) \
+ ({ \
+ unsigned int __imm = (imm); \
+@@ -475,8 +492,45 @@ static void bpf_jit_epilogue(struct bpf_
+ EMIT4(0xb9040000, REG_2, BPF_REG_0);
+ /* Restore registers */
+ save_restore_regs(jit, REGS_RESTORE);
++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
++ jit->r14_thunk_ip = jit->prg;
++ /* Generate __s390_indirect_jump_r14 thunk */
++ if (test_facility(35)) {
++ /* exrl %r0,.+10 */
++ EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
++ } else {
++ /* larl %r1,.+14 */
++ EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
++ /* ex 0,0(%r1) */
++ EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
++ }
++ /* j . */
++ EMIT4_PCREL(0xa7f40000, 0);
++ }
+ /* br %r14 */
+ _EMIT2(0x07fe);
++
++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
++ (jit->seen & SEEN_FUNC)) {
++ jit->r1_thunk_ip = jit->prg;
++ /* Generate __s390_indirect_jump_r1 thunk */
++ if (test_facility(35)) {
++ /* exrl %r0,.+10 */
++ EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
++ /* j . */
++ EMIT4_PCREL(0xa7f40000, 0);
++ /* br %r1 */
++ _EMIT2(0x07f1);
++ } else {
++ /* larl %r1,.+14 */
++ EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
++ /* ex 0,S390_lowcore.br_r1_tampoline */
++ EMIT4_DISP(0x44000000, REG_0, REG_0,
++ offsetof(struct _lowcore, br_r1_trampoline));
++ /* j . */
++ EMIT4_PCREL(0xa7f40000, 0);
++ }
++ }
+ }
+
+ /*
+@@ -980,8 +1034,13 @@ static noinline int bpf_jit_insn(struct
+ /* lg %w1,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
+ EMIT_CONST_U64(func));
+- /* basr %r14,%w1 */
+- EMIT2(0x0d00, REG_14, REG_W1);
++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
++ /* brasl %r14,__s390_indirect_jump_r1 */
++ EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
++ } else {
++ /* basr %r14,%w1 */
++ EMIT2(0x0d00, REG_14, REG_W1);
++ }
+ /* lgr %b0,%r2: load return value into %b0 */
+ EMIT4(0xb9040000, BPF_REG_0, REG_2);
+ if (bpf_helper_changes_skb_data((void *)func)) {
sock_diag-fix-use-after-free-read-in-__sk_free.patch
tcp-purge-write-queue-in-tcp_connect_init.patch
ext2-fix-a-block-leak.patch
+s390-add-assembler-macros-for-cpu-alternatives.patch
+s390-move-expoline-assembler-macros-to-a-header.patch
+s390-lib-use-expoline-for-indirect-branches.patch
+s390-ftrace-use-expoline-for-indirect-branches.patch
+s390-kernel-use-expoline-for-indirect-branches.patch
+s390-move-spectre-sysfs-attribute-code.patch
+s390-extend-expoline-to-bc-instructions.patch
+s390-use-expoline-thunks-in-the-bpf-jit.patch