]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.4.133/s390-use-expoline-thunks-in-the-bpf-jit.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.4.133 / s390-use-expoline-thunks-in-the-bpf-jit.patch
CommitLineData
69d801ac
GKH
1From foo@baz Wed May 23 19:42:20 CEST 2018
2From: Martin Schwidefsky <schwidefsky@de.ibm.com>
3Date: Wed, 23 May 2018 18:21:36 +0200
4Subject: s390: use expoline thunks in the BPF JIT
5To: stable@vger.kernel.org
6Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
7Message-ID: <1527092496-24207-10-git-send-email-schwidefsky@de.ibm.com>
8
9From: Martin Schwidefsky <schwidefsky@de.ibm.com>
10
11[ Upstream commit de5cb6eb514ebe241e3edeb290cb41deb380b81d ]
12
13The BPF JIT need safe guarding against spectre v2 in the sk_load_xxx
14assembler stubs and the indirect branches generated by the JIT itself
15need to be converted to expolines.
16
17Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
18Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
19---
20 arch/s390/net/bpf_jit.S | 16 ++++++----
21 arch/s390/net/bpf_jit_comp.c | 63 +++++++++++++++++++++++++++++++++++++++++--
22 2 files changed, 71 insertions(+), 8 deletions(-)
23
24--- a/arch/s390/net/bpf_jit.S
25+++ b/arch/s390/net/bpf_jit.S
26@@ -8,6 +8,7 @@
27 */
28
29 #include <linux/linkage.h>
30+#include <asm/nospec-insn.h>
31 #include "bpf_jit.h"
32
33 /*
34@@ -53,7 +54,7 @@ ENTRY(sk_load_##NAME##_pos); \
35 clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
36 jh sk_load_##NAME##_slow; \
37 LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
38- b OFF_OK(%r6); /* Return */ \
39+ B_EX OFF_OK,%r6; /* Return */ \
40 \
41 sk_load_##NAME##_slow:; \
42 lgr %r2,%r7; /* Arg1 = skb pointer */ \
43@@ -63,11 +64,14 @@ sk_load_##NAME##_slow:; \
44 brasl %r14,skb_copy_bits; /* Get data from skb */ \
45 LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
46 ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
47- br %r6; /* Return */
48+ BR_EX %r6; /* Return */
49
50 sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
51 sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
52
53+ GEN_BR_THUNK %r6
54+ GEN_B_THUNK OFF_OK,%r6
55+
56 /*
57 * Load 1 byte from SKB (optimized version)
58 */
59@@ -79,7 +83,7 @@ ENTRY(sk_load_byte_pos)
60 clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
61 jnl sk_load_byte_slow
62 llgc %r14,0(%r3,%r12) # Get byte from skb
63- b OFF_OK(%r6) # Return OK
64+ B_EX OFF_OK,%r6 # Return OK
65
66 sk_load_byte_slow:
67 lgr %r2,%r7 # Arg1 = skb pointer
68@@ -89,7 +93,7 @@ sk_load_byte_slow:
69 brasl %r14,skb_copy_bits # Get data from skb
70 llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
71 ltgr %r2,%r2 # Set cc to (%r2 != 0)
72- br %r6 # Return cc
73+ BR_EX %r6 # Return cc
74
75 #define sk_negative_common(NAME, SIZE, LOAD) \
76 sk_load_##NAME##_slow_neg:; \
77@@ -103,7 +107,7 @@ sk_load_##NAME##_slow_neg:; \
78 jz bpf_error; \
79 LOAD %r14,0(%r2); /* Get data from pointer */ \
80 xr %r3,%r3; /* Set cc to zero */ \
81- br %r6; /* Return cc */
82+ BR_EX %r6; /* Return cc */
83
84 sk_negative_common(word, 4, llgf)
85 sk_negative_common(half, 2, llgh)
86@@ -112,4 +116,4 @@ sk_negative_common(byte, 1, llgc)
87 bpf_error:
88 # force a return 0 from jit handler
89 ltgr %r15,%r15 # Set condition code
90- br %r6
91+ BR_EX %r6
92--- a/arch/s390/net/bpf_jit_comp.c
93+++ b/arch/s390/net/bpf_jit_comp.c
94@@ -24,6 +24,8 @@
95 #include <linux/bpf.h>
96 #include <asm/cacheflush.h>
97 #include <asm/dis.h>
98+#include <asm/facility.h>
99+#include <asm/nospec-branch.h>
100 #include "bpf_jit.h"
101
102 int bpf_jit_enable __read_mostly;
103@@ -41,6 +43,8 @@ struct bpf_jit {
104 int base_ip; /* Base address for literal pool */
105 int ret0_ip; /* Address of return 0 */
106 int exit_ip; /* Address of exit */
107+ int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
108+ int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
109 int tail_call_start; /* Tail call start offset */
110 int labels[1]; /* Labels for local jumps */
111 };
112@@ -248,6 +252,19 @@ static inline void reg_set_seen(struct b
113 REG_SET_SEEN(b2); \
114 })
115
116+#define EMIT6_PCREL_RILB(op, b, target) \
117+({ \
118+ int rel = (target - jit->prg) / 2; \
119+ _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \
120+ REG_SET_SEEN(b); \
121+})
122+
123+#define EMIT6_PCREL_RIL(op, target) \
124+({ \
125+ int rel = (target - jit->prg) / 2; \
126+ _EMIT6(op | rel >> 16, rel & 0xffff); \
127+})
128+
129 #define _EMIT6_IMM(op, imm) \
130 ({ \
131 unsigned int __imm = (imm); \
132@@ -475,8 +492,45 @@ static void bpf_jit_epilogue(struct bpf_
133 EMIT4(0xb9040000, REG_2, BPF_REG_0);
134 /* Restore registers */
135 save_restore_regs(jit, REGS_RESTORE);
136+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
137+ jit->r14_thunk_ip = jit->prg;
138+ /* Generate __s390_indirect_jump_r14 thunk */
139+ if (test_facility(35)) {
140+ /* exrl %r0,.+10 */
141+ EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
142+ } else {
143+ /* larl %r1,.+14 */
144+ EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
145+ /* ex 0,0(%r1) */
146+ EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
147+ }
148+ /* j . */
149+ EMIT4_PCREL(0xa7f40000, 0);
150+ }
151 /* br %r14 */
152 _EMIT2(0x07fe);
153+
154+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
155+ (jit->seen & SEEN_FUNC)) {
156+ jit->r1_thunk_ip = jit->prg;
157+ /* Generate __s390_indirect_jump_r1 thunk */
158+ if (test_facility(35)) {
159+ /* exrl %r0,.+10 */
160+ EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
161+ /* j . */
162+ EMIT4_PCREL(0xa7f40000, 0);
163+ /* br %r1 */
164+ _EMIT2(0x07f1);
165+ } else {
166+ /* larl %r1,.+14 */
167+ EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
168+ /* ex 0,S390_lowcore.br_r1_tampoline */
169+ EMIT4_DISP(0x44000000, REG_0, REG_0,
170+ offsetof(struct _lowcore, br_r1_trampoline));
171+ /* j . */
172+ EMIT4_PCREL(0xa7f40000, 0);
173+ }
174+ }
175 }
176
177 /*
178@@ -980,8 +1034,13 @@ static noinline int bpf_jit_insn(struct
179 /* lg %w1,<d(imm)>(%l) */
180 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
181 EMIT_CONST_U64(func));
182- /* basr %r14,%w1 */
183- EMIT2(0x0d00, REG_14, REG_W1);
184+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
185+ /* brasl %r14,__s390_indirect_jump_r1 */
186+ EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
187+ } else {
188+ /* basr %r14,%w1 */
189+ EMIT2(0x0d00, REG_14, REG_W1);
190+ }
191 /* lgr %b0,%r2: load return value into %b0 */
192 EMIT4(0xb9040000, BPF_REG_0, REG_2);
193 if (bpf_helper_changes_skb_data((void *)func)) {