]>
Commit | Line | Data |
---|---|---|
b886d83c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
a2c7a983 IM |
2 | /* |
3 | * bpf_jit_comp.c: BPF JIT compiler | |
0a14842f | 4 | * |
3b58908a | 5 | * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) |
62258278 | 6 | * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
0a14842f | 7 | */ |
0a14842f ED |
8 | #include <linux/netdevice.h> |
9 | #include <linux/filter.h> | |
855ddb56 | 10 | #include <linux/if_vlan.h> |
71d22d58 | 11 | #include <linux/bpf.h> |
5964b200 | 12 | #include <linux/memory.h> |
75ccbef6 | 13 | #include <linux/sort.h> |
3dec541b | 14 | #include <asm/extable.h> |
d1163651 | 15 | #include <asm/set_memory.h> |
a493a87f | 16 | #include <asm/nospec-branch.h> |
5964b200 | 17 | #include <asm/text-patching.h> |
75ccbef6 | 18 | #include <asm/asm-prototypes.h> |
0a14842f | 19 | |
5cccc702 | 20 | static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) |
0a14842f ED |
21 | { |
22 | if (len == 1) | |
23 | *ptr = bytes; | |
24 | else if (len == 2) | |
25 | *(u16 *)ptr = bytes; | |
26 | else { | |
27 | *(u32 *)ptr = bytes; | |
28 | barrier(); | |
29 | } | |
30 | return ptr + len; | |
31 | } | |
32 | ||
b52f00e6 AS |
33 | #define EMIT(bytes, len) \ |
34 | do { prog = emit_code(prog, bytes, len); cnt += len; } while (0) | |
0a14842f ED |
35 | |
36 | #define EMIT1(b1) EMIT(b1, 1) | |
37 | #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) | |
38 | #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) | |
39 | #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) | |
a2c7a983 | 40 | |
62258278 | 41 | #define EMIT1_off32(b1, off) \ |
a2c7a983 | 42 | do { EMIT1(b1); EMIT(off, 4); } while (0) |
62258278 | 43 | #define EMIT2_off32(b1, b2, off) \ |
a2c7a983 | 44 | do { EMIT2(b1, b2); EMIT(off, 4); } while (0) |
62258278 | 45 | #define EMIT3_off32(b1, b2, b3, off) \ |
a2c7a983 | 46 | do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) |
62258278 | 47 | #define EMIT4_off32(b1, b2, b3, b4, off) \ |
a2c7a983 | 48 | do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) |
0a14842f | 49 | |
5cccc702 | 50 | static bool is_imm8(int value) |
0a14842f ED |
51 | { |
52 | return value <= 127 && value >= -128; | |
53 | } | |
54 | ||
5cccc702 | 55 | static bool is_simm32(s64 value) |
0a14842f | 56 | { |
6fe8b9c1 DB |
57 | return value == (s64)(s32)value; |
58 | } | |
59 | ||
60 | static bool is_uimm32(u64 value) | |
61 | { | |
62 | return value == (u64)(u32)value; | |
0a14842f ED |
63 | } |
64 | ||
e430f34e | 65 | /* mov dst, src */ |
a2c7a983 IM |
66 | #define EMIT_mov(DST, SRC) \ |
67 | do { \ | |
68 | if (DST != SRC) \ | |
69 | EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ | |
62258278 AS |
70 | } while (0) |
71 | ||
72 | static int bpf_size_to_x86_bytes(int bpf_size) | |
73 | { | |
74 | if (bpf_size == BPF_W) | |
75 | return 4; | |
76 | else if (bpf_size == BPF_H) | |
77 | return 2; | |
78 | else if (bpf_size == BPF_B) | |
79 | return 1; | |
80 | else if (bpf_size == BPF_DW) | |
81 | return 4; /* imm32 */ | |
82 | else | |
83 | return 0; | |
84 | } | |
0a14842f | 85 | |
a2c7a983 IM |
86 | /* |
87 | * List of x86 cond jumps opcodes (. + s8) | |
0a14842f ED |
88 | * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) |
89 | */ | |
90 | #define X86_JB 0x72 | |
91 | #define X86_JAE 0x73 | |
92 | #define X86_JE 0x74 | |
93 | #define X86_JNE 0x75 | |
94 | #define X86_JBE 0x76 | |
95 | #define X86_JA 0x77 | |
52afc51e | 96 | #define X86_JL 0x7C |
62258278 | 97 | #define X86_JGE 0x7D |
52afc51e | 98 | #define X86_JLE 0x7E |
62258278 | 99 | #define X86_JG 0x7F |
0a14842f | 100 | |
a2c7a983 | 101 | /* Pick a register outside of BPF range for JIT internal work */ |
959a7579 | 102 | #define AUX_REG (MAX_BPF_JIT_REG + 1) |
fec56f58 | 103 | #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) |
62258278 | 104 | |
a2c7a983 IM |
105 | /* |
106 | * The following table maps BPF registers to x86-64 registers. | |
959a7579 | 107 | * |
a2c7a983 | 108 | * x86-64 register R12 is unused, since if used as base address |
959a7579 DB |
109 | * register in load/store instructions, it always needs an |
110 | * extra byte of encoding and is callee saved. | |
111 | * | |
fec56f58 AS |
112 | * x86-64 register R9 is not used by BPF programs, but can be used by BPF |
113 | * trampoline. x86-64 register R10 is used for blinding (if enabled). | |
62258278 AS |
114 | */ |
115 | static const int reg2hex[] = { | |
a2c7a983 IM |
116 | [BPF_REG_0] = 0, /* RAX */ |
117 | [BPF_REG_1] = 7, /* RDI */ | |
118 | [BPF_REG_2] = 6, /* RSI */ | |
119 | [BPF_REG_3] = 2, /* RDX */ | |
120 | [BPF_REG_4] = 1, /* RCX */ | |
121 | [BPF_REG_5] = 0, /* R8 */ | |
122 | [BPF_REG_6] = 3, /* RBX callee saved */ | |
123 | [BPF_REG_7] = 5, /* R13 callee saved */ | |
124 | [BPF_REG_8] = 6, /* R14 callee saved */ | |
125 | [BPF_REG_9] = 7, /* R15 callee saved */ | |
126 | [BPF_REG_FP] = 5, /* RBP readonly */ | |
127 | [BPF_REG_AX] = 2, /* R10 temp register */ | |
128 | [AUX_REG] = 3, /* R11 temp register */ | |
fec56f58 | 129 | [X86_REG_R9] = 1, /* R9 register, 6th function argument */ |
62258278 AS |
130 | }; |
131 | ||
3dec541b AS |
132 | static const int reg2pt_regs[] = { |
133 | [BPF_REG_0] = offsetof(struct pt_regs, ax), | |
134 | [BPF_REG_1] = offsetof(struct pt_regs, di), | |
135 | [BPF_REG_2] = offsetof(struct pt_regs, si), | |
136 | [BPF_REG_3] = offsetof(struct pt_regs, dx), | |
137 | [BPF_REG_4] = offsetof(struct pt_regs, cx), | |
138 | [BPF_REG_5] = offsetof(struct pt_regs, r8), | |
139 | [BPF_REG_6] = offsetof(struct pt_regs, bx), | |
140 | [BPF_REG_7] = offsetof(struct pt_regs, r13), | |
141 | [BPF_REG_8] = offsetof(struct pt_regs, r14), | |
142 | [BPF_REG_9] = offsetof(struct pt_regs, r15), | |
143 | }; | |
144 | ||
a2c7a983 IM |
145 | /* |
146 | * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 | |
62258278 AS |
147 | * which need extra byte of encoding. |
148 | * rax,rcx,...,rbp have simpler encoding | |
149 | */ | |
5cccc702 | 150 | static bool is_ereg(u32 reg) |
62258278 | 151 | { |
d148134b JP |
152 | return (1 << reg) & (BIT(BPF_REG_5) | |
153 | BIT(AUX_REG) | | |
154 | BIT(BPF_REG_7) | | |
155 | BIT(BPF_REG_8) | | |
959a7579 | 156 | BIT(BPF_REG_9) | |
fec56f58 | 157 | BIT(X86_REG_R9) | |
959a7579 | 158 | BIT(BPF_REG_AX)); |
62258278 AS |
159 | } |
160 | ||
aee194b1 LN |
161 | /* |
162 | * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 | |
163 | * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte | |
164 | * of encoding. al,cl,dl,bl have simpler encoding. | |
165 | */ | |
166 | static bool is_ereg_8l(u32 reg) | |
167 | { | |
168 | return is_ereg(reg) || | |
169 | (1 << reg) & (BIT(BPF_REG_1) | | |
170 | BIT(BPF_REG_2) | | |
171 | BIT(BPF_REG_FP)); | |
172 | } | |
173 | ||
de0a444d DB |
174 | static bool is_axreg(u32 reg) |
175 | { | |
176 | return reg == BPF_REG_0; | |
177 | } | |
178 | ||
a2c7a983 | 179 | /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ |
5cccc702 | 180 | static u8 add_1mod(u8 byte, u32 reg) |
62258278 AS |
181 | { |
182 | if (is_ereg(reg)) | |
183 | byte |= 1; | |
184 | return byte; | |
185 | } | |
186 | ||
5cccc702 | 187 | static u8 add_2mod(u8 byte, u32 r1, u32 r2) |
62258278 AS |
188 | { |
189 | if (is_ereg(r1)) | |
190 | byte |= 1; | |
191 | if (is_ereg(r2)) | |
192 | byte |= 4; | |
193 | return byte; | |
194 | } | |
195 | ||
a2c7a983 | 196 | /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ |
5cccc702 | 197 | static u8 add_1reg(u8 byte, u32 dst_reg) |
62258278 | 198 | { |
e430f34e | 199 | return byte + reg2hex[dst_reg]; |
62258278 AS |
200 | } |
201 | ||
a2c7a983 | 202 | /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ |
5cccc702 | 203 | static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) |
62258278 | 204 | { |
e430f34e | 205 | return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); |
62258278 AS |
206 | } |
207 | ||
738cbe72 DB |
208 | static void jit_fill_hole(void *area, unsigned int size) |
209 | { | |
a2c7a983 | 210 | /* Fill whole space with INT3 instructions */ |
738cbe72 DB |
211 | memset(area, 0xcc, size); |
212 | } | |
213 | ||
f3c2af7b | 214 | struct jit_context { |
a2c7a983 | 215 | int cleanup_addr; /* Epilogue code offset */ |
f3c2af7b AS |
216 | }; |
217 | ||
a2c7a983 | 218 | /* Maximum number of bytes emitted while JITing one eBPF insn */ |
e0ee9c12 AS |
219 | #define BPF_MAX_INSN_SIZE 128 |
220 | #define BPF_INSN_SAFETY 64 | |
4b3da77b DB |
221 | |
222 | /* Number of bytes emit_patch() needs to generate instructions */ | |
223 | #define X86_PATCH_SIZE 5 | |
e0ee9c12 | 224 | |
9fd4a39d | 225 | #define PROLOGUE_SIZE 25 |
b52f00e6 | 226 | |
a2c7a983 IM |
227 | /* |
228 | * Emit x86-64 prologue code for BPF program and check its size. | |
b52f00e6 AS |
229 | * bpf_tail_call helper will skip it while jumping into another program |
230 | */ | |
08691752 | 231 | static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) |
0a14842f | 232 | { |
b52f00e6 | 233 | u8 *prog = *pprog; |
4b3da77b | 234 | int cnt = X86_PATCH_SIZE; |
0a14842f | 235 | |
9fd4a39d AS |
236 | /* BPF trampoline can be made to work without these nops, |
237 | * but let's waste 5 bytes for now and optimize later | |
238 | */ | |
239 | memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt); | |
240 | prog += cnt; | |
fe8d9571 AS |
241 | EMIT1(0x55); /* push rbp */ |
242 | EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ | |
243 | /* sub rsp, rounded_stack_depth */ | |
244 | EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); | |
245 | EMIT1(0x53); /* push rbx */ | |
246 | EMIT2(0x41, 0x55); /* push r13 */ | |
247 | EMIT2(0x41, 0x56); /* push r14 */ | |
248 | EMIT2(0x41, 0x57); /* push r15 */ | |
08691752 | 249 | if (!ebpf_from_cbpf) { |
fe8d9571 AS |
250 | /* zero init tail_call_cnt */ |
251 | EMIT2(0x6a, 0x00); | |
08691752 DB |
252 | BUILD_BUG_ON(cnt != PROLOGUE_SIZE); |
253 | } | |
b52f00e6 AS |
254 | *pprog = prog; |
255 | } | |
256 | ||
428d5df1 DB |
257 | static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) |
258 | { | |
259 | u8 *prog = *pprog; | |
260 | int cnt = 0; | |
261 | s64 offset; | |
262 | ||
263 | offset = func - (ip + X86_PATCH_SIZE); | |
264 | if (!is_simm32(offset)) { | |
265 | pr_err("Target call %p is out of range\n", func); | |
266 | return -ERANGE; | |
267 | } | |
268 | EMIT1_off32(opcode, offset); | |
269 | *pprog = prog; | |
270 | return 0; | |
271 | } | |
272 | ||
273 | static int emit_call(u8 **pprog, void *func, void *ip) | |
274 | { | |
275 | return emit_patch(pprog, func, ip, 0xE8); | |
276 | } | |
277 | ||
278 | static int emit_jump(u8 **pprog, void *func, void *ip) | |
279 | { | |
280 | return emit_patch(pprog, func, ip, 0xE9); | |
281 | } | |
282 | ||
283 | static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, | |
284 | void *old_addr, void *new_addr, | |
285 | const bool text_live) | |
286 | { | |
428d5df1 | 287 | const u8 *nop_insn = ideal_nops[NOP_ATOMIC5]; |
b553a6ec DB |
288 | u8 old_insn[X86_PATCH_SIZE]; |
289 | u8 new_insn[X86_PATCH_SIZE]; | |
428d5df1 DB |
290 | u8 *prog; |
291 | int ret; | |
292 | ||
b553a6ec DB |
293 | memcpy(old_insn, nop_insn, X86_PATCH_SIZE); |
294 | if (old_addr) { | |
295 | prog = old_insn; | |
296 | ret = t == BPF_MOD_CALL ? | |
297 | emit_call(&prog, old_addr, ip) : | |
298 | emit_jump(&prog, old_addr, ip); | |
299 | if (ret) | |
300 | return ret; | |
428d5df1 DB |
301 | } |
302 | ||
b553a6ec DB |
303 | memcpy(new_insn, nop_insn, X86_PATCH_SIZE); |
304 | if (new_addr) { | |
305 | prog = new_insn; | |
306 | ret = t == BPF_MOD_CALL ? | |
307 | emit_call(&prog, new_addr, ip) : | |
308 | emit_jump(&prog, new_addr, ip); | |
309 | if (ret) | |
310 | return ret; | |
428d5df1 DB |
311 | } |
312 | ||
313 | ret = -EBUSY; | |
314 | mutex_lock(&text_mutex); | |
315 | if (memcmp(ip, old_insn, X86_PATCH_SIZE)) | |
316 | goto out; | |
b553a6ec DB |
317 | if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { |
318 | if (text_live) | |
319 | text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); | |
320 | else | |
321 | memcpy(ip, new_insn, X86_PATCH_SIZE); | |
322 | } | |
428d5df1 DB |
323 | ret = 0; |
324 | out: | |
325 | mutex_unlock(&text_mutex); | |
326 | return ret; | |
327 | } | |
328 | ||
329 | int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, | |
330 | void *old_addr, void *new_addr) | |
331 | { | |
332 | if (!is_kernel_text((long)ip) && | |
333 | !is_bpf_text_address((long)ip)) | |
334 | /* BPF poking in modules is not supported */ | |
335 | return -EINVAL; | |
336 | ||
337 | return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true); | |
338 | } | |
339 | ||
a2c7a983 IM |
340 | /* |
341 | * Generate the following code: | |
342 | * | |
b52f00e6 AS |
343 | * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... |
344 | * if (index >= array->map.max_entries) | |
345 | * goto out; | |
346 | * if (++tail_call_cnt > MAX_TAIL_CALL_CNT) | |
347 | * goto out; | |
2a36f0b9 | 348 | * prog = array->ptrs[index]; |
b52f00e6 AS |
349 | * if (prog == NULL) |
350 | * goto out; | |
351 | * goto *(prog->bpf_func + prologue_size); | |
352 | * out: | |
353 | */ | |
428d5df1 | 354 | static void emit_bpf_tail_call_indirect(u8 **pprog) |
b52f00e6 AS |
355 | { |
356 | u8 *prog = *pprog; | |
357 | int label1, label2, label3; | |
358 | int cnt = 0; | |
359 | ||
a2c7a983 IM |
360 | /* |
361 | * rdi - pointer to ctx | |
b52f00e6 AS |
362 | * rsi - pointer to bpf_array |
363 | * rdx - index in bpf_array | |
364 | */ | |
365 | ||
a2c7a983 IM |
366 | /* |
367 | * if (index >= array->map.max_entries) | |
368 | * goto out; | |
b52f00e6 | 369 | */ |
90caccdd AS |
370 | EMIT2(0x89, 0xD2); /* mov edx, edx */ |
371 | EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ | |
b52f00e6 | 372 | offsetof(struct bpf_array, map.max_entries)); |
a2c7a983 | 373 | #define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */ |
b52f00e6 AS |
374 | EMIT2(X86_JBE, OFFSET1); /* jbe out */ |
375 | label1 = cnt; | |
376 | ||
a2c7a983 IM |
377 | /* |
378 | * if (tail_call_cnt > MAX_TAIL_CALL_CNT) | |
379 | * goto out; | |
b52f00e6 | 380 | */ |
fe8d9571 | 381 | EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */ |
b52f00e6 | 382 | EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ |
a493a87f | 383 | #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE) |
b52f00e6 AS |
384 | EMIT2(X86_JA, OFFSET2); /* ja out */ |
385 | label2 = cnt; | |
386 | EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ | |
fe8d9571 | 387 | EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */ |
b52f00e6 | 388 | |
2a36f0b9 | 389 | /* prog = array->ptrs[index]; */ |
84ccac6e | 390 | EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */ |
2a36f0b9 | 391 | offsetof(struct bpf_array, ptrs)); |
b52f00e6 | 392 | |
a2c7a983 IM |
393 | /* |
394 | * if (prog == NULL) | |
395 | * goto out; | |
b52f00e6 | 396 | */ |
84ccac6e | 397 | EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ |
a493a87f | 398 | #define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE) |
b52f00e6 AS |
399 | EMIT2(X86_JE, OFFSET3); /* je out */ |
400 | label3 = cnt; | |
401 | ||
402 | /* goto *(prog->bpf_func + prologue_size); */ | |
403 | EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */ | |
404 | offsetof(struct bpf_prog, bpf_func)); | |
405 | EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */ | |
406 | ||
a2c7a983 IM |
407 | /* |
408 | * Wow we're ready to jump into next BPF program | |
b52f00e6 AS |
409 | * rdi == ctx (1st arg) |
410 | * rax == prog->bpf_func + prologue_size | |
411 | */ | |
a493a87f | 412 | RETPOLINE_RAX_BPF_JIT(); |
b52f00e6 AS |
413 | |
414 | /* out: */ | |
415 | BUILD_BUG_ON(cnt - label1 != OFFSET1); | |
416 | BUILD_BUG_ON(cnt - label2 != OFFSET2); | |
417 | BUILD_BUG_ON(cnt - label3 != OFFSET3); | |
418 | *pprog = prog; | |
419 | } | |
420 | ||
428d5df1 DB |
421 | static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, |
422 | u8 **pprog, int addr, u8 *image) | |
423 | { | |
424 | u8 *prog = *pprog; | |
425 | int cnt = 0; | |
426 | ||
427 | /* | |
428 | * if (tail_call_cnt > MAX_TAIL_CALL_CNT) | |
429 | * goto out; | |
430 | */ | |
431 | EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */ | |
432 | EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ | |
433 | EMIT2(X86_JA, 14); /* ja out */ | |
434 | EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ | |
435 | EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */ | |
436 | ||
437 | poke->ip = image + (addr - X86_PATCH_SIZE); | |
438 | poke->adj_off = PROLOGUE_SIZE; | |
439 | ||
440 | memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE); | |
441 | prog += X86_PATCH_SIZE; | |
442 | /* out: */ | |
443 | ||
444 | *pprog = prog; | |
445 | } | |
446 | ||
447 | static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) | |
448 | { | |
428d5df1 DB |
449 | struct bpf_jit_poke_descriptor *poke; |
450 | struct bpf_array *array; | |
451 | struct bpf_prog *target; | |
452 | int i, ret; | |
453 | ||
454 | for (i = 0; i < prog->aux->size_poke_tab; i++) { | |
455 | poke = &prog->aux->poke_tab[i]; | |
456 | WARN_ON_ONCE(READ_ONCE(poke->ip_stable)); | |
457 | ||
458 | if (poke->reason != BPF_POKE_REASON_TAIL_CALL) | |
459 | continue; | |
460 | ||
461 | array = container_of(poke->tail_call.map, struct bpf_array, map); | |
462 | mutex_lock(&array->aux->poke_mutex); | |
463 | target = array->ptrs[poke->tail_call.key]; | |
464 | if (target) { | |
465 | /* Plain memcpy is used when image is not live yet | |
466 | * and still not locked as read-only. Once poke | |
467 | * location is active (poke->ip_stable), any parallel | |
468 | * bpf_arch_text_poke() might occur still on the | |
469 | * read-write image until we finally locked it as | |
470 | * read-only. Both modifications on the given image | |
471 | * are under text_mutex to avoid interference. | |
472 | */ | |
b553a6ec | 473 | ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL, |
428d5df1 DB |
474 | (u8 *)target->bpf_func + |
475 | poke->adj_off, false); | |
476 | BUG_ON(ret < 0); | |
477 | } | |
478 | WRITE_ONCE(poke->ip_stable, true); | |
479 | mutex_unlock(&array->aux->poke_mutex); | |
480 | } | |
481 | } | |
482 | ||
6fe8b9c1 DB |
483 | static void emit_mov_imm32(u8 **pprog, bool sign_propagate, |
484 | u32 dst_reg, const u32 imm32) | |
485 | { | |
486 | u8 *prog = *pprog; | |
487 | u8 b1, b2, b3; | |
488 | int cnt = 0; | |
489 | ||
a2c7a983 IM |
490 | /* |
491 | * Optimization: if imm32 is positive, use 'mov %eax, imm32' | |
6fe8b9c1 DB |
492 | * (which zero-extends imm32) to save 2 bytes. |
493 | */ | |
494 | if (sign_propagate && (s32)imm32 < 0) { | |
495 | /* 'mov %rax, imm32' sign extends imm32 */ | |
496 | b1 = add_1mod(0x48, dst_reg); | |
497 | b2 = 0xC7; | |
498 | b3 = 0xC0; | |
499 | EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); | |
500 | goto done; | |
501 | } | |
502 | ||
a2c7a983 IM |
503 | /* |
504 | * Optimization: if imm32 is zero, use 'xor %eax, %eax' | |
6fe8b9c1 DB |
505 | * to save 3 bytes. |
506 | */ | |
507 | if (imm32 == 0) { | |
508 | if (is_ereg(dst_reg)) | |
509 | EMIT1(add_2mod(0x40, dst_reg, dst_reg)); | |
510 | b2 = 0x31; /* xor */ | |
511 | b3 = 0xC0; | |
512 | EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); | |
513 | goto done; | |
514 | } | |
515 | ||
516 | /* mov %eax, imm32 */ | |
517 | if (is_ereg(dst_reg)) | |
518 | EMIT1(add_1mod(0x40, dst_reg)); | |
519 | EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); | |
520 | done: | |
521 | *pprog = prog; | |
522 | } | |
523 | ||
524 | static void emit_mov_imm64(u8 **pprog, u32 dst_reg, | |
525 | const u32 imm32_hi, const u32 imm32_lo) | |
526 | { | |
527 | u8 *prog = *pprog; | |
528 | int cnt = 0; | |
529 | ||
530 | if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { | |
a2c7a983 IM |
531 | /* |
532 | * For emitting plain u32, where sign bit must not be | |
6fe8b9c1 DB |
533 | * propagated LLVM tends to load imm64 over mov32 |
534 | * directly, so save couple of bytes by just doing | |
535 | * 'mov %eax, imm32' instead. | |
536 | */ | |
537 | emit_mov_imm32(&prog, false, dst_reg, imm32_lo); | |
538 | } else { | |
539 | /* movabsq %rax, imm64 */ | |
540 | EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); | |
541 | EMIT(imm32_lo, 4); | |
542 | EMIT(imm32_hi, 4); | |
543 | } | |
544 | ||
545 | *pprog = prog; | |
546 | } | |
547 | ||
4c38e2f3 DB |
548 | static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) |
549 | { | |
550 | u8 *prog = *pprog; | |
551 | int cnt = 0; | |
552 | ||
553 | if (is64) { | |
554 | /* mov dst, src */ | |
555 | EMIT_mov(dst_reg, src_reg); | |
556 | } else { | |
557 | /* mov32 dst, src */ | |
558 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
559 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); | |
560 | EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); | |
561 | } | |
562 | ||
563 | *pprog = prog; | |
564 | } | |
565 | ||
3b2744e6 AS |
566 | /* LDX: dst_reg = *(u8*)(src_reg + off) */ |
567 | static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) | |
568 | { | |
569 | u8 *prog = *pprog; | |
570 | int cnt = 0; | |
571 | ||
572 | switch (size) { | |
573 | case BPF_B: | |
574 | /* Emit 'movzx rax, byte ptr [rax + off]' */ | |
575 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); | |
576 | break; | |
577 | case BPF_H: | |
578 | /* Emit 'movzx rax, word ptr [rax + off]' */ | |
579 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); | |
580 | break; | |
581 | case BPF_W: | |
582 | /* Emit 'mov eax, dword ptr [rax+0x14]' */ | |
583 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
584 | EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); | |
585 | else | |
586 | EMIT1(0x8B); | |
587 | break; | |
588 | case BPF_DW: | |
589 | /* Emit 'mov rax, qword ptr [rax+0x14]' */ | |
590 | EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); | |
591 | break; | |
592 | } | |
593 | /* | |
594 | * If insn->off == 0 we can save one extra byte, but | |
595 | * special case of x86 R13 which always needs an offset | |
596 | * is not worth the hassle | |
597 | */ | |
598 | if (is_imm8(off)) | |
599 | EMIT2(add_2reg(0x40, src_reg, dst_reg), off); | |
600 | else | |
601 | EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off); | |
602 | *pprog = prog; | |
603 | } | |
604 | ||
605 | /* STX: *(u8*)(dst_reg + off) = src_reg */ | |
606 | static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) | |
607 | { | |
608 | u8 *prog = *pprog; | |
609 | int cnt = 0; | |
610 | ||
611 | switch (size) { | |
612 | case BPF_B: | |
613 | /* Emit 'mov byte ptr [rax + off], al' */ | |
aee194b1 LN |
614 | if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) |
615 | /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ | |
3b2744e6 AS |
616 | EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); |
617 | else | |
618 | EMIT1(0x88); | |
619 | break; | |
620 | case BPF_H: | |
621 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
622 | EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); | |
623 | else | |
624 | EMIT2(0x66, 0x89); | |
625 | break; | |
626 | case BPF_W: | |
627 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
628 | EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); | |
629 | else | |
630 | EMIT1(0x89); | |
631 | break; | |
632 | case BPF_DW: | |
633 | EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); | |
634 | break; | |
635 | } | |
636 | if (is_imm8(off)) | |
637 | EMIT2(add_2reg(0x40, dst_reg, src_reg), off); | |
638 | else | |
639 | EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off); | |
640 | *pprog = prog; | |
641 | } | |
642 | ||
3dec541b AS |
643 | static bool ex_handler_bpf(const struct exception_table_entry *x, |
644 | struct pt_regs *regs, int trapnr, | |
645 | unsigned long error_code, unsigned long fault_addr) | |
646 | { | |
647 | u32 reg = x->fixup >> 8; | |
648 | ||
649 | /* jump over faulting load and clear dest register */ | |
650 | *(unsigned long *)((void *)regs + reg) = 0; | |
651 | regs->ip += x->fixup & 0xff; | |
652 | return true; | |
653 | } | |
654 | ||
b52f00e6 AS |
655 | static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, |
656 | int oldproglen, struct jit_context *ctx) | |
657 | { | |
658 | struct bpf_insn *insn = bpf_prog->insnsi; | |
659 | int insn_cnt = bpf_prog->len; | |
b52f00e6 AS |
660 | bool seen_exit = false; |
661 | u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; | |
3dec541b | 662 | int i, cnt = 0, excnt = 0; |
b52f00e6 AS |
663 | int proglen = 0; |
664 | u8 *prog = temp; | |
665 | ||
08691752 DB |
666 | emit_prologue(&prog, bpf_prog->aux->stack_depth, |
667 | bpf_prog_was_classic(bpf_prog)); | |
7c2e988f | 668 | addrs[0] = prog - temp; |
b52f00e6 | 669 | |
7c2e988f | 670 | for (i = 1; i <= insn_cnt; i++, insn++) { |
e430f34e AS |
671 | const s32 imm32 = insn->imm; |
672 | u32 dst_reg = insn->dst_reg; | |
673 | u32 src_reg = insn->src_reg; | |
6fe8b9c1 | 674 | u8 b2 = 0, b3 = 0; |
62258278 AS |
675 | s64 jmp_offset; |
676 | u8 jmp_cond; | |
677 | int ilen; | |
678 | u8 *func; | |
679 | ||
680 | switch (insn->code) { | |
681 | /* ALU */ | |
682 | case BPF_ALU | BPF_ADD | BPF_X: | |
683 | case BPF_ALU | BPF_SUB | BPF_X: | |
684 | case BPF_ALU | BPF_AND | BPF_X: | |
685 | case BPF_ALU | BPF_OR | BPF_X: | |
686 | case BPF_ALU | BPF_XOR | BPF_X: | |
687 | case BPF_ALU64 | BPF_ADD | BPF_X: | |
688 | case BPF_ALU64 | BPF_SUB | BPF_X: | |
689 | case BPF_ALU64 | BPF_AND | BPF_X: | |
690 | case BPF_ALU64 | BPF_OR | BPF_X: | |
691 | case BPF_ALU64 | BPF_XOR | BPF_X: | |
692 | switch (BPF_OP(insn->code)) { | |
693 | case BPF_ADD: b2 = 0x01; break; | |
694 | case BPF_SUB: b2 = 0x29; break; | |
695 | case BPF_AND: b2 = 0x21; break; | |
696 | case BPF_OR: b2 = 0x09; break; | |
697 | case BPF_XOR: b2 = 0x31; break; | |
0a14842f | 698 | } |
62258278 | 699 | if (BPF_CLASS(insn->code) == BPF_ALU64) |
e430f34e AS |
700 | EMIT1(add_2mod(0x48, dst_reg, src_reg)); |
701 | else if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
702 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); | |
703 | EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); | |
62258278 | 704 | break; |
0a14842f | 705 | |
62258278 | 706 | case BPF_ALU64 | BPF_MOV | BPF_X: |
62258278 | 707 | case BPF_ALU | BPF_MOV | BPF_X: |
4c38e2f3 DB |
708 | emit_mov_reg(&prog, |
709 | BPF_CLASS(insn->code) == BPF_ALU64, | |
710 | dst_reg, src_reg); | |
62258278 | 711 | break; |
0a14842f | 712 | |
e430f34e | 713 | /* neg dst */ |
62258278 AS |
714 | case BPF_ALU | BPF_NEG: |
715 | case BPF_ALU64 | BPF_NEG: | |
716 | if (BPF_CLASS(insn->code) == BPF_ALU64) | |
e430f34e AS |
717 | EMIT1(add_1mod(0x48, dst_reg)); |
718 | else if (is_ereg(dst_reg)) | |
719 | EMIT1(add_1mod(0x40, dst_reg)); | |
720 | EMIT2(0xF7, add_1reg(0xD8, dst_reg)); | |
62258278 AS |
721 | break; |
722 | ||
723 | case BPF_ALU | BPF_ADD | BPF_K: | |
724 | case BPF_ALU | BPF_SUB | BPF_K: | |
725 | case BPF_ALU | BPF_AND | BPF_K: | |
726 | case BPF_ALU | BPF_OR | BPF_K: | |
727 | case BPF_ALU | BPF_XOR | BPF_K: | |
728 | case BPF_ALU64 | BPF_ADD | BPF_K: | |
729 | case BPF_ALU64 | BPF_SUB | BPF_K: | |
730 | case BPF_ALU64 | BPF_AND | BPF_K: | |
731 | case BPF_ALU64 | BPF_OR | BPF_K: | |
732 | case BPF_ALU64 | BPF_XOR | BPF_K: | |
733 | if (BPF_CLASS(insn->code) == BPF_ALU64) | |
e430f34e AS |
734 | EMIT1(add_1mod(0x48, dst_reg)); |
735 | else if (is_ereg(dst_reg)) | |
736 | EMIT1(add_1mod(0x40, dst_reg)); | |
62258278 | 737 | |
a2c7a983 IM |
738 | /* |
739 | * b3 holds 'normal' opcode, b2 short form only valid | |
de0a444d DB |
740 | * in case dst is eax/rax. |
741 | */ | |
62258278 | 742 | switch (BPF_OP(insn->code)) { |
de0a444d DB |
743 | case BPF_ADD: |
744 | b3 = 0xC0; | |
745 | b2 = 0x05; | |
746 | break; | |
747 | case BPF_SUB: | |
748 | b3 = 0xE8; | |
749 | b2 = 0x2D; | |
750 | break; | |
751 | case BPF_AND: | |
752 | b3 = 0xE0; | |
753 | b2 = 0x25; | |
754 | break; | |
755 | case BPF_OR: | |
756 | b3 = 0xC8; | |
757 | b2 = 0x0D; | |
758 | break; | |
759 | case BPF_XOR: | |
760 | b3 = 0xF0; | |
761 | b2 = 0x35; | |
762 | break; | |
62258278 AS |
763 | } |
764 | ||
e430f34e AS |
765 | if (is_imm8(imm32)) |
766 | EMIT3(0x83, add_1reg(b3, dst_reg), imm32); | |
de0a444d DB |
767 | else if (is_axreg(dst_reg)) |
768 | EMIT1_off32(b2, imm32); | |
62258278 | 769 | else |
e430f34e | 770 | EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); |
62258278 AS |
771 | break; |
772 | ||
773 | case BPF_ALU64 | BPF_MOV | BPF_K: | |
62258278 | 774 | case BPF_ALU | BPF_MOV | BPF_K: |
6fe8b9c1 DB |
775 | emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, |
776 | dst_reg, imm32); | |
62258278 AS |
777 | break; |
778 | ||
02ab695b | 779 | case BPF_LD | BPF_IMM | BPF_DW: |
6fe8b9c1 | 780 | emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); |
02ab695b AS |
781 | insn++; |
782 | i++; | |
783 | break; | |
784 | ||
e430f34e | 785 | /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ |
62258278 AS |
786 | case BPF_ALU | BPF_MOD | BPF_X: |
787 | case BPF_ALU | BPF_DIV | BPF_X: | |
788 | case BPF_ALU | BPF_MOD | BPF_K: | |
789 | case BPF_ALU | BPF_DIV | BPF_K: | |
790 | case BPF_ALU64 | BPF_MOD | BPF_X: | |
791 | case BPF_ALU64 | BPF_DIV | BPF_X: | |
792 | case BPF_ALU64 | BPF_MOD | BPF_K: | |
793 | case BPF_ALU64 | BPF_DIV | BPF_K: | |
794 | EMIT1(0x50); /* push rax */ | |
795 | EMIT1(0x52); /* push rdx */ | |
796 | ||
797 | if (BPF_SRC(insn->code) == BPF_X) | |
e430f34e AS |
798 | /* mov r11, src_reg */ |
799 | EMIT_mov(AUX_REG, src_reg); | |
62258278 | 800 | else |
e430f34e AS |
801 | /* mov r11, imm32 */ |
802 | EMIT3_off32(0x49, 0xC7, 0xC3, imm32); | |
62258278 | 803 | |
e430f34e AS |
804 | /* mov rax, dst_reg */ |
805 | EMIT_mov(BPF_REG_0, dst_reg); | |
62258278 | 806 | |
a2c7a983 IM |
807 | /* |
808 | * xor edx, edx | |
62258278 AS |
809 | * equivalent to 'xor rdx, rdx', but one byte less |
810 | */ | |
811 | EMIT2(0x31, 0xd2); | |
812 | ||
62258278 AS |
813 | if (BPF_CLASS(insn->code) == BPF_ALU64) |
814 | /* div r11 */ | |
815 | EMIT3(0x49, 0xF7, 0xF3); | |
816 | else | |
817 | /* div r11d */ | |
818 | EMIT3(0x41, 0xF7, 0xF3); | |
819 | ||
820 | if (BPF_OP(insn->code) == BPF_MOD) | |
821 | /* mov r11, rdx */ | |
822 | EMIT3(0x49, 0x89, 0xD3); | |
823 | else | |
824 | /* mov r11, rax */ | |
825 | EMIT3(0x49, 0x89, 0xC3); | |
826 | ||
827 | EMIT1(0x5A); /* pop rdx */ | |
828 | EMIT1(0x58); /* pop rax */ | |
829 | ||
e430f34e AS |
830 | /* mov dst_reg, r11 */ |
831 | EMIT_mov(dst_reg, AUX_REG); | |
62258278 AS |
832 | break; |
833 | ||
834 | case BPF_ALU | BPF_MUL | BPF_K: | |
835 | case BPF_ALU | BPF_MUL | BPF_X: | |
836 | case BPF_ALU64 | BPF_MUL | BPF_K: | |
837 | case BPF_ALU64 | BPF_MUL | BPF_X: | |
4c38e2f3 DB |
838 | { |
839 | bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; | |
840 | ||
d806a0cf DB |
841 | if (dst_reg != BPF_REG_0) |
842 | EMIT1(0x50); /* push rax */ | |
843 | if (dst_reg != BPF_REG_3) | |
844 | EMIT1(0x52); /* push rdx */ | |
62258278 | 845 | |
e430f34e AS |
846 | /* mov r11, dst_reg */ |
847 | EMIT_mov(AUX_REG, dst_reg); | |
62258278 AS |
848 | |
849 | if (BPF_SRC(insn->code) == BPF_X) | |
4c38e2f3 | 850 | emit_mov_reg(&prog, is64, BPF_REG_0, src_reg); |
62258278 | 851 | else |
4c38e2f3 | 852 | emit_mov_imm32(&prog, is64, BPF_REG_0, imm32); |
62258278 | 853 | |
4c38e2f3 | 854 | if (is64) |
62258278 AS |
855 | EMIT1(add_1mod(0x48, AUX_REG)); |
856 | else if (is_ereg(AUX_REG)) | |
857 | EMIT1(add_1mod(0x40, AUX_REG)); | |
858 | /* mul(q) r11 */ | |
859 | EMIT2(0xF7, add_1reg(0xE0, AUX_REG)); | |
860 | ||
d806a0cf DB |
861 | if (dst_reg != BPF_REG_3) |
862 | EMIT1(0x5A); /* pop rdx */ | |
863 | if (dst_reg != BPF_REG_0) { | |
864 | /* mov dst_reg, rax */ | |
865 | EMIT_mov(dst_reg, BPF_REG_0); | |
866 | EMIT1(0x58); /* pop rax */ | |
867 | } | |
62258278 | 868 | break; |
4c38e2f3 | 869 | } |
a2c7a983 | 870 | /* Shifts */ |
62258278 AS |
871 | case BPF_ALU | BPF_LSH | BPF_K: |
872 | case BPF_ALU | BPF_RSH | BPF_K: | |
873 | case BPF_ALU | BPF_ARSH | BPF_K: | |
874 | case BPF_ALU64 | BPF_LSH | BPF_K: | |
875 | case BPF_ALU64 | BPF_RSH | BPF_K: | |
876 | case BPF_ALU64 | BPF_ARSH | BPF_K: | |
877 | if (BPF_CLASS(insn->code) == BPF_ALU64) | |
e430f34e AS |
878 | EMIT1(add_1mod(0x48, dst_reg)); |
879 | else if (is_ereg(dst_reg)) | |
880 | EMIT1(add_1mod(0x40, dst_reg)); | |
62258278 AS |
881 | |
882 | switch (BPF_OP(insn->code)) { | |
883 | case BPF_LSH: b3 = 0xE0; break; | |
884 | case BPF_RSH: b3 = 0xE8; break; | |
885 | case BPF_ARSH: b3 = 0xF8; break; | |
886 | } | |
88e69a1f DB |
887 | |
888 | if (imm32 == 1) | |
889 | EMIT2(0xD1, add_1reg(b3, dst_reg)); | |
890 | else | |
891 | EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); | |
62258278 AS |
892 | break; |
893 | ||
72b603ee AS |
894 | case BPF_ALU | BPF_LSH | BPF_X: |
895 | case BPF_ALU | BPF_RSH | BPF_X: | |
896 | case BPF_ALU | BPF_ARSH | BPF_X: | |
897 | case BPF_ALU64 | BPF_LSH | BPF_X: | |
898 | case BPF_ALU64 | BPF_RSH | BPF_X: | |
899 | case BPF_ALU64 | BPF_ARSH | BPF_X: | |
900 | ||
a2c7a983 | 901 | /* Check for bad case when dst_reg == rcx */ |
72b603ee AS |
902 | if (dst_reg == BPF_REG_4) { |
903 | /* mov r11, dst_reg */ | |
904 | EMIT_mov(AUX_REG, dst_reg); | |
905 | dst_reg = AUX_REG; | |
906 | } | |
907 | ||
908 | if (src_reg != BPF_REG_4) { /* common case */ | |
909 | EMIT1(0x51); /* push rcx */ | |
910 | ||
911 | /* mov rcx, src_reg */ | |
912 | EMIT_mov(BPF_REG_4, src_reg); | |
913 | } | |
914 | ||
915 | /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ | |
916 | if (BPF_CLASS(insn->code) == BPF_ALU64) | |
917 | EMIT1(add_1mod(0x48, dst_reg)); | |
918 | else if (is_ereg(dst_reg)) | |
919 | EMIT1(add_1mod(0x40, dst_reg)); | |
920 | ||
921 | switch (BPF_OP(insn->code)) { | |
922 | case BPF_LSH: b3 = 0xE0; break; | |
923 | case BPF_RSH: b3 = 0xE8; break; | |
924 | case BPF_ARSH: b3 = 0xF8; break; | |
925 | } | |
926 | EMIT2(0xD3, add_1reg(b3, dst_reg)); | |
927 | ||
928 | if (src_reg != BPF_REG_4) | |
929 | EMIT1(0x59); /* pop rcx */ | |
930 | ||
931 | if (insn->dst_reg == BPF_REG_4) | |
932 | /* mov dst_reg, r11 */ | |
933 | EMIT_mov(insn->dst_reg, AUX_REG); | |
934 | break; | |
935 | ||
62258278 | 936 | case BPF_ALU | BPF_END | BPF_FROM_BE: |
e430f34e | 937 | switch (imm32) { |
62258278 | 938 | case 16: |
a2c7a983 | 939 | /* Emit 'ror %ax, 8' to swap lower 2 bytes */ |
62258278 | 940 | EMIT1(0x66); |
e430f34e | 941 | if (is_ereg(dst_reg)) |
62258278 | 942 | EMIT1(0x41); |
e430f34e | 943 | EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); |
343f845b | 944 | |
a2c7a983 | 945 | /* Emit 'movzwl eax, ax' */ |
343f845b AS |
946 | if (is_ereg(dst_reg)) |
947 | EMIT3(0x45, 0x0F, 0xB7); | |
948 | else | |
949 | EMIT2(0x0F, 0xB7); | |
950 | EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); | |
62258278 AS |
951 | break; |
952 | case 32: | |
a2c7a983 | 953 | /* Emit 'bswap eax' to swap lower 4 bytes */ |
e430f34e | 954 | if (is_ereg(dst_reg)) |
62258278 | 955 | EMIT2(0x41, 0x0F); |
0a14842f | 956 | else |
62258278 | 957 | EMIT1(0x0F); |
e430f34e | 958 | EMIT1(add_1reg(0xC8, dst_reg)); |
0a14842f | 959 | break; |
62258278 | 960 | case 64: |
a2c7a983 | 961 | /* Emit 'bswap rax' to swap 8 bytes */ |
e430f34e AS |
962 | EMIT3(add_1mod(0x48, dst_reg), 0x0F, |
963 | add_1reg(0xC8, dst_reg)); | |
3b58908a ED |
964 | break; |
965 | } | |
62258278 AS |
966 | break; |
967 | ||
968 | case BPF_ALU | BPF_END | BPF_FROM_LE: | |
343f845b AS |
969 | switch (imm32) { |
970 | case 16: | |
a2c7a983 IM |
971 | /* |
972 | * Emit 'movzwl eax, ax' to zero extend 16-bit | |
343f845b AS |
973 | * into 64 bit |
974 | */ | |
975 | if (is_ereg(dst_reg)) | |
976 | EMIT3(0x45, 0x0F, 0xB7); | |
977 | else | |
978 | EMIT2(0x0F, 0xB7); | |
979 | EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); | |
980 | break; | |
981 | case 32: | |
a2c7a983 | 982 | /* Emit 'mov eax, eax' to clear upper 32-bits */ |
343f845b AS |
983 | if (is_ereg(dst_reg)) |
984 | EMIT1(0x45); | |
985 | EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); | |
986 | break; | |
987 | case 64: | |
988 | /* nop */ | |
989 | break; | |
990 | } | |
62258278 AS |
991 | break; |
992 | ||
e430f34e | 993 | /* ST: *(u8*)(dst_reg + off) = imm */ |
62258278 | 994 | case BPF_ST | BPF_MEM | BPF_B: |
e430f34e | 995 | if (is_ereg(dst_reg)) |
62258278 AS |
996 | EMIT2(0x41, 0xC6); |
997 | else | |
998 | EMIT1(0xC6); | |
999 | goto st; | |
1000 | case BPF_ST | BPF_MEM | BPF_H: | |
e430f34e | 1001 | if (is_ereg(dst_reg)) |
62258278 AS |
1002 | EMIT3(0x66, 0x41, 0xC7); |
1003 | else | |
1004 | EMIT2(0x66, 0xC7); | |
1005 | goto st; | |
1006 | case BPF_ST | BPF_MEM | BPF_W: | |
e430f34e | 1007 | if (is_ereg(dst_reg)) |
62258278 AS |
1008 | EMIT2(0x41, 0xC7); |
1009 | else | |
1010 | EMIT1(0xC7); | |
1011 | goto st; | |
1012 | case BPF_ST | BPF_MEM | BPF_DW: | |
e430f34e | 1013 | EMIT2(add_1mod(0x48, dst_reg), 0xC7); |
62258278 AS |
1014 | |
1015 | st: if (is_imm8(insn->off)) | |
e430f34e | 1016 | EMIT2(add_1reg(0x40, dst_reg), insn->off); |
62258278 | 1017 | else |
e430f34e | 1018 | EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); |
62258278 | 1019 | |
e430f34e | 1020 | EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); |
62258278 AS |
1021 | break; |
1022 | ||
e430f34e | 1023 | /* STX: *(u8*)(dst_reg + off) = src_reg */ |
62258278 | 1024 | case BPF_STX | BPF_MEM | BPF_B: |
62258278 | 1025 | case BPF_STX | BPF_MEM | BPF_H: |
62258278 | 1026 | case BPF_STX | BPF_MEM | BPF_W: |
62258278 | 1027 | case BPF_STX | BPF_MEM | BPF_DW: |
3b2744e6 | 1028 | emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); |
62258278 AS |
1029 | break; |
1030 | ||
e430f34e | 1031 | /* LDX: dst_reg = *(u8*)(src_reg + off) */ |
62258278 | 1032 | case BPF_LDX | BPF_MEM | BPF_B: |
3dec541b | 1033 | case BPF_LDX | BPF_PROBE_MEM | BPF_B: |
62258278 | 1034 | case BPF_LDX | BPF_MEM | BPF_H: |
3dec541b | 1035 | case BPF_LDX | BPF_PROBE_MEM | BPF_H: |
62258278 | 1036 | case BPF_LDX | BPF_MEM | BPF_W: |
3dec541b | 1037 | case BPF_LDX | BPF_PROBE_MEM | BPF_W: |
62258278 | 1038 | case BPF_LDX | BPF_MEM | BPF_DW: |
3dec541b | 1039 | case BPF_LDX | BPF_PROBE_MEM | BPF_DW: |
3b2744e6 | 1040 | emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); |
3dec541b AS |
1041 | if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { |
1042 | struct exception_table_entry *ex; | |
1043 | u8 *_insn = image + proglen; | |
1044 | s64 delta; | |
1045 | ||
1046 | if (!bpf_prog->aux->extable) | |
1047 | break; | |
1048 | ||
1049 | if (excnt >= bpf_prog->aux->num_exentries) { | |
1050 | pr_err("ex gen bug\n"); | |
1051 | return -EFAULT; | |
1052 | } | |
1053 | ex = &bpf_prog->aux->extable[excnt++]; | |
1054 | ||
1055 | delta = _insn - (u8 *)&ex->insn; | |
1056 | if (!is_simm32(delta)) { | |
1057 | pr_err("extable->insn doesn't fit into 32-bit\n"); | |
1058 | return -EFAULT; | |
1059 | } | |
1060 | ex->insn = delta; | |
1061 | ||
1062 | delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler; | |
1063 | if (!is_simm32(delta)) { | |
1064 | pr_err("extable->handler doesn't fit into 32-bit\n"); | |
1065 | return -EFAULT; | |
1066 | } | |
1067 | ex->handler = delta; | |
1068 | ||
1069 | if (dst_reg > BPF_REG_9) { | |
1070 | pr_err("verifier error\n"); | |
1071 | return -EFAULT; | |
1072 | } | |
1073 | /* | |
1074 | * Compute size of x86 insn and its target dest x86 register. | |
1075 | * ex_handler_bpf() will use lower 8 bits to adjust | |
1076 | * pt_regs->ip to jump over this x86 instruction | |
1077 | * and upper bits to figure out which pt_regs to zero out. | |
1078 | * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" | |
1079 | * of 4 bytes will be ignored and rbx will be zero inited. | |
1080 | */ | |
1081 | ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8); | |
1082 | } | |
62258278 AS |
1083 | break; |
1084 | ||
e430f34e | 1085 | /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */ |
62258278 | 1086 | case BPF_STX | BPF_XADD | BPF_W: |
a2c7a983 | 1087 | /* Emit 'lock add dword ptr [rax + off], eax' */ |
e430f34e AS |
1088 | if (is_ereg(dst_reg) || is_ereg(src_reg)) |
1089 | EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01); | |
62258278 AS |
1090 | else |
1091 | EMIT2(0xF0, 0x01); | |
1092 | goto xadd; | |
1093 | case BPF_STX | BPF_XADD | BPF_DW: | |
e430f34e | 1094 | EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01); |
62258278 | 1095 | xadd: if (is_imm8(insn->off)) |
e430f34e | 1096 | EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); |
62258278 | 1097 | else |
e430f34e | 1098 | EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), |
62258278 AS |
1099 | insn->off); |
1100 | break; | |
1101 | ||
1102 | /* call */ | |
1103 | case BPF_JMP | BPF_CALL: | |
e430f34e | 1104 | func = (u8 *) __bpf_call_base + imm32; |
3b2744e6 | 1105 | if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) |
62258278 | 1106 | return -EINVAL; |
62258278 AS |
1107 | break; |
1108 | ||
71189fa9 | 1109 | case BPF_JMP | BPF_TAIL_CALL: |
428d5df1 DB |
1110 | if (imm32) |
1111 | emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], | |
1112 | &prog, addrs[i], image); | |
1113 | else | |
1114 | emit_bpf_tail_call_indirect(&prog); | |
b52f00e6 AS |
1115 | break; |
1116 | ||
62258278 AS |
1117 | /* cond jump */ |
1118 | case BPF_JMP | BPF_JEQ | BPF_X: | |
1119 | case BPF_JMP | BPF_JNE | BPF_X: | |
1120 | case BPF_JMP | BPF_JGT | BPF_X: | |
52afc51e | 1121 | case BPF_JMP | BPF_JLT | BPF_X: |
62258278 | 1122 | case BPF_JMP | BPF_JGE | BPF_X: |
52afc51e | 1123 | case BPF_JMP | BPF_JLE | BPF_X: |
62258278 | 1124 | case BPF_JMP | BPF_JSGT | BPF_X: |
52afc51e | 1125 | case BPF_JMP | BPF_JSLT | BPF_X: |
62258278 | 1126 | case BPF_JMP | BPF_JSGE | BPF_X: |
52afc51e | 1127 | case BPF_JMP | BPF_JSLE | BPF_X: |
3f5d6525 JW |
1128 | case BPF_JMP32 | BPF_JEQ | BPF_X: |
1129 | case BPF_JMP32 | BPF_JNE | BPF_X: | |
1130 | case BPF_JMP32 | BPF_JGT | BPF_X: | |
1131 | case BPF_JMP32 | BPF_JLT | BPF_X: | |
1132 | case BPF_JMP32 | BPF_JGE | BPF_X: | |
1133 | case BPF_JMP32 | BPF_JLE | BPF_X: | |
1134 | case BPF_JMP32 | BPF_JSGT | BPF_X: | |
1135 | case BPF_JMP32 | BPF_JSLT | BPF_X: | |
1136 | case BPF_JMP32 | BPF_JSGE | BPF_X: | |
1137 | case BPF_JMP32 | BPF_JSLE | BPF_X: | |
e430f34e | 1138 | /* cmp dst_reg, src_reg */ |
3f5d6525 JW |
1139 | if (BPF_CLASS(insn->code) == BPF_JMP) |
1140 | EMIT1(add_2mod(0x48, dst_reg, src_reg)); | |
1141 | else if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
1142 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); | |
1143 | EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); | |
62258278 AS |
1144 | goto emit_cond_jmp; |
1145 | ||
1146 | case BPF_JMP | BPF_JSET | BPF_X: | |
3f5d6525 | 1147 | case BPF_JMP32 | BPF_JSET | BPF_X: |
e430f34e | 1148 | /* test dst_reg, src_reg */ |
3f5d6525 JW |
1149 | if (BPF_CLASS(insn->code) == BPF_JMP) |
1150 | EMIT1(add_2mod(0x48, dst_reg, src_reg)); | |
1151 | else if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
1152 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); | |
1153 | EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); | |
62258278 AS |
1154 | goto emit_cond_jmp; |
1155 | ||
1156 | case BPF_JMP | BPF_JSET | BPF_K: | |
3f5d6525 | 1157 | case BPF_JMP32 | BPF_JSET | BPF_K: |
e430f34e | 1158 | /* test dst_reg, imm32 */ |
3f5d6525 JW |
1159 | if (BPF_CLASS(insn->code) == BPF_JMP) |
1160 | EMIT1(add_1mod(0x48, dst_reg)); | |
1161 | else if (is_ereg(dst_reg)) | |
1162 | EMIT1(add_1mod(0x40, dst_reg)); | |
e430f34e | 1163 | EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); |
62258278 AS |
1164 | goto emit_cond_jmp; |
1165 | ||
1166 | case BPF_JMP | BPF_JEQ | BPF_K: | |
1167 | case BPF_JMP | BPF_JNE | BPF_K: | |
1168 | case BPF_JMP | BPF_JGT | BPF_K: | |
52afc51e | 1169 | case BPF_JMP | BPF_JLT | BPF_K: |
62258278 | 1170 | case BPF_JMP | BPF_JGE | BPF_K: |
52afc51e | 1171 | case BPF_JMP | BPF_JLE | BPF_K: |
62258278 | 1172 | case BPF_JMP | BPF_JSGT | BPF_K: |
52afc51e | 1173 | case BPF_JMP | BPF_JSLT | BPF_K: |
62258278 | 1174 | case BPF_JMP | BPF_JSGE | BPF_K: |
52afc51e | 1175 | case BPF_JMP | BPF_JSLE | BPF_K: |
3f5d6525 JW |
1176 | case BPF_JMP32 | BPF_JEQ | BPF_K: |
1177 | case BPF_JMP32 | BPF_JNE | BPF_K: | |
1178 | case BPF_JMP32 | BPF_JGT | BPF_K: | |
1179 | case BPF_JMP32 | BPF_JLT | BPF_K: | |
1180 | case BPF_JMP32 | BPF_JGE | BPF_K: | |
1181 | case BPF_JMP32 | BPF_JLE | BPF_K: | |
1182 | case BPF_JMP32 | BPF_JSGT | BPF_K: | |
1183 | case BPF_JMP32 | BPF_JSLT | BPF_K: | |
1184 | case BPF_JMP32 | BPF_JSGE | BPF_K: | |
1185 | case BPF_JMP32 | BPF_JSLE | BPF_K: | |
38f51c07 DB |
1186 | /* test dst_reg, dst_reg to save one extra byte */ |
1187 | if (imm32 == 0) { | |
1188 | if (BPF_CLASS(insn->code) == BPF_JMP) | |
1189 | EMIT1(add_2mod(0x48, dst_reg, dst_reg)); | |
1190 | else if (is_ereg(dst_reg)) | |
1191 | EMIT1(add_2mod(0x40, dst_reg, dst_reg)); | |
1192 | EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); | |
1193 | goto emit_cond_jmp; | |
1194 | } | |
1195 | ||
e430f34e | 1196 | /* cmp dst_reg, imm8/32 */ |
3f5d6525 JW |
1197 | if (BPF_CLASS(insn->code) == BPF_JMP) |
1198 | EMIT1(add_1mod(0x48, dst_reg)); | |
1199 | else if (is_ereg(dst_reg)) | |
1200 | EMIT1(add_1mod(0x40, dst_reg)); | |
62258278 | 1201 | |
e430f34e AS |
1202 | if (is_imm8(imm32)) |
1203 | EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); | |
62258278 | 1204 | else |
e430f34e | 1205 | EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); |
62258278 | 1206 | |
a2c7a983 | 1207 | emit_cond_jmp: /* Convert BPF opcode to x86 */ |
62258278 AS |
1208 | switch (BPF_OP(insn->code)) { |
1209 | case BPF_JEQ: | |
1210 | jmp_cond = X86_JE; | |
1211 | break; | |
1212 | case BPF_JSET: | |
1213 | case BPF_JNE: | |
1214 | jmp_cond = X86_JNE; | |
1215 | break; | |
1216 | case BPF_JGT: | |
1217 | /* GT is unsigned '>', JA in x86 */ | |
1218 | jmp_cond = X86_JA; | |
1219 | break; | |
52afc51e DB |
1220 | case BPF_JLT: |
1221 | /* LT is unsigned '<', JB in x86 */ | |
1222 | jmp_cond = X86_JB; | |
1223 | break; | |
62258278 AS |
1224 | case BPF_JGE: |
1225 | /* GE is unsigned '>=', JAE in x86 */ | |
1226 | jmp_cond = X86_JAE; | |
1227 | break; | |
52afc51e DB |
1228 | case BPF_JLE: |
1229 | /* LE is unsigned '<=', JBE in x86 */ | |
1230 | jmp_cond = X86_JBE; | |
1231 | break; | |
62258278 | 1232 | case BPF_JSGT: |
a2c7a983 | 1233 | /* Signed '>', GT in x86 */ |
62258278 AS |
1234 | jmp_cond = X86_JG; |
1235 | break; | |
52afc51e | 1236 | case BPF_JSLT: |
a2c7a983 | 1237 | /* Signed '<', LT in x86 */ |
52afc51e DB |
1238 | jmp_cond = X86_JL; |
1239 | break; | |
62258278 | 1240 | case BPF_JSGE: |
a2c7a983 | 1241 | /* Signed '>=', GE in x86 */ |
62258278 AS |
1242 | jmp_cond = X86_JGE; |
1243 | break; | |
52afc51e | 1244 | case BPF_JSLE: |
a2c7a983 | 1245 | /* Signed '<=', LE in x86 */ |
52afc51e DB |
1246 | jmp_cond = X86_JLE; |
1247 | break; | |
a2c7a983 | 1248 | default: /* to silence GCC warning */ |
62258278 AS |
1249 | return -EFAULT; |
1250 | } | |
1251 | jmp_offset = addrs[i + insn->off] - addrs[i]; | |
1252 | if (is_imm8(jmp_offset)) { | |
1253 | EMIT2(jmp_cond, jmp_offset); | |
1254 | } else if (is_simm32(jmp_offset)) { | |
1255 | EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); | |
1256 | } else { | |
1257 | pr_err("cond_jmp gen bug %llx\n", jmp_offset); | |
1258 | return -EFAULT; | |
1259 | } | |
1260 | ||
1261 | break; | |
0a14842f | 1262 | |
62258278 | 1263 | case BPF_JMP | BPF_JA: |
1612a981 GB |
1264 | if (insn->off == -1) |
1265 | /* -1 jmp instructions will always jump | |
1266 | * backwards two bytes. Explicitly handling | |
1267 | * this case avoids wasting too many passes | |
1268 | * when there are long sequences of replaced | |
1269 | * dead code. | |
1270 | */ | |
1271 | jmp_offset = -2; | |
1272 | else | |
1273 | jmp_offset = addrs[i + insn->off] - addrs[i]; | |
1274 | ||
62258278 | 1275 | if (!jmp_offset) |
a2c7a983 | 1276 | /* Optimize out nop jumps */ |
62258278 AS |
1277 | break; |
1278 | emit_jmp: | |
1279 | if (is_imm8(jmp_offset)) { | |
1280 | EMIT2(0xEB, jmp_offset); | |
1281 | } else if (is_simm32(jmp_offset)) { | |
1282 | EMIT1_off32(0xE9, jmp_offset); | |
1283 | } else { | |
1284 | pr_err("jmp gen bug %llx\n", jmp_offset); | |
1285 | return -EFAULT; | |
1286 | } | |
1287 | break; | |
1288 | ||
62258278 | 1289 | case BPF_JMP | BPF_EXIT: |
769e0de6 | 1290 | if (seen_exit) { |
62258278 AS |
1291 | jmp_offset = ctx->cleanup_addr - addrs[i]; |
1292 | goto emit_jmp; | |
1293 | } | |
769e0de6 | 1294 | seen_exit = true; |
a2c7a983 | 1295 | /* Update cleanup_addr */ |
62258278 | 1296 | ctx->cleanup_addr = proglen; |
fe8d9571 AS |
1297 | if (!bpf_prog_was_classic(bpf_prog)) |
1298 | EMIT1(0x5B); /* get rid of tail_call_cnt */ | |
1299 | EMIT2(0x41, 0x5F); /* pop r15 */ | |
1300 | EMIT2(0x41, 0x5E); /* pop r14 */ | |
1301 | EMIT2(0x41, 0x5D); /* pop r13 */ | |
1302 | EMIT1(0x5B); /* pop rbx */ | |
1303 | EMIT1(0xC9); /* leave */ | |
1304 | EMIT1(0xC3); /* ret */ | |
62258278 AS |
1305 | break; |
1306 | ||
f3c2af7b | 1307 | default: |
a2c7a983 IM |
1308 | /* |
1309 | * By design x86-64 JIT should support all BPF instructions. | |
62258278 | 1310 | * This error will be seen if new instruction was added |
a2c7a983 IM |
1311 | * to the interpreter, but not to the JIT, or if there is |
1312 | * junk in bpf_prog. | |
62258278 AS |
1313 | */ |
1314 | pr_err("bpf_jit: unknown opcode %02x\n", insn->code); | |
f3c2af7b AS |
1315 | return -EINVAL; |
1316 | } | |
62258278 | 1317 | |
f3c2af7b | 1318 | ilen = prog - temp; |
e0ee9c12 | 1319 | if (ilen > BPF_MAX_INSN_SIZE) { |
9383191d | 1320 | pr_err("bpf_jit: fatal insn size error\n"); |
e0ee9c12 AS |
1321 | return -EFAULT; |
1322 | } | |
1323 | ||
f3c2af7b AS |
1324 | if (image) { |
1325 | if (unlikely(proglen + ilen > oldproglen)) { | |
9383191d | 1326 | pr_err("bpf_jit: fatal error\n"); |
f3c2af7b | 1327 | return -EFAULT; |
0a14842f | 1328 | } |
f3c2af7b | 1329 | memcpy(image + proglen, temp, ilen); |
0a14842f | 1330 | } |
f3c2af7b AS |
1331 | proglen += ilen; |
1332 | addrs[i] = proglen; | |
1333 | prog = temp; | |
1334 | } | |
3dec541b AS |
1335 | |
1336 | if (image && excnt != bpf_prog->aux->num_exentries) { | |
1337 | pr_err("extable is not populated\n"); | |
1338 | return -EFAULT; | |
1339 | } | |
f3c2af7b AS |
1340 | return proglen; |
1341 | } | |
1342 | ||
85d33df3 | 1343 | static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, |
fec56f58 AS |
1344 | int stack_size) |
1345 | { | |
1346 | int i; | |
1347 | /* Store function arguments to stack. | |
1348 | * For a function that accepts two pointers the sequence will be: | |
1349 | * mov QWORD PTR [rbp-0x10],rdi | |
1350 | * mov QWORD PTR [rbp-0x8],rsi | |
1351 | */ | |
1352 | for (i = 0; i < min(nr_args, 6); i++) | |
1353 | emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]), | |
1354 | BPF_REG_FP, | |
1355 | i == 5 ? X86_REG_R9 : BPF_REG_1 + i, | |
1356 | -(stack_size - i * 8)); | |
1357 | } | |
1358 | ||
85d33df3 | 1359 | static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, |
fec56f58 AS |
1360 | int stack_size) |
1361 | { | |
1362 | int i; | |
1363 | ||
1364 | /* Restore function arguments from stack. | |
1365 | * For a function that accepts two pointers the sequence will be: | |
1366 | * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] | |
1367 | * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] | |
1368 | */ | |
1369 | for (i = 0; i < min(nr_args, 6); i++) | |
1370 | emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]), | |
1371 | i == 5 ? X86_REG_R9 : BPF_REG_1 + i, | |
1372 | BPF_REG_FP, | |
1373 | -(stack_size - i * 8)); | |
1374 | } | |
1375 | ||
7e639208 | 1376 | static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, |
ae240823 | 1377 | struct bpf_prog *p, int stack_size, bool mod_ret) |
7e639208 KS |
1378 | { |
1379 | u8 *prog = *pprog; | |
1380 | int cnt = 0; | |
1381 | ||
1382 | if (emit_call(&prog, __bpf_prog_enter, prog)) | |
1383 | return -EINVAL; | |
1384 | /* remember prog start time returned by __bpf_prog_enter */ | |
1385 | emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); | |
1386 | ||
1387 | /* arg1: lea rdi, [rbp - stack_size] */ | |
1388 | EMIT4(0x48, 0x8D, 0x7D, -stack_size); | |
1389 | /* arg2: progs[i]->insnsi for interpreter */ | |
1390 | if (!p->jited) | |
1391 | emit_mov_imm64(&prog, BPF_REG_2, | |
1392 | (long) p->insnsi >> 32, | |
1393 | (u32) (long) p->insnsi); | |
1394 | /* call JITed bpf program or interpreter */ | |
1395 | if (emit_call(&prog, p->bpf_func, prog)) | |
1396 | return -EINVAL; | |
1397 | ||
ae240823 KS |
1398 | /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return |
1399 | * of the previous call which is then passed on the stack to | |
1400 | * the next BPF program. | |
1401 | */ | |
1402 | if (mod_ret) | |
1403 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); | |
1404 | ||
7e639208 KS |
1405 | /* arg1: mov rdi, progs[i] */ |
1406 | emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, | |
1407 | (u32) (long) p); | |
1408 | /* arg2: mov rsi, rbx <- start time in nsec */ | |
1409 | emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); | |
1410 | if (emit_call(&prog, __bpf_prog_exit, prog)) | |
1411 | return -EINVAL; | |
1412 | ||
1413 | *pprog = prog; | |
1414 | return 0; | |
1415 | } | |
1416 | ||
1417 | static void emit_nops(u8 **pprog, unsigned int len) | |
1418 | { | |
1419 | unsigned int i, noplen; | |
1420 | u8 *prog = *pprog; | |
1421 | int cnt = 0; | |
1422 | ||
1423 | while (len > 0) { | |
1424 | noplen = len; | |
1425 | ||
1426 | if (noplen > ASM_NOP_MAX) | |
1427 | noplen = ASM_NOP_MAX; | |
1428 | ||
1429 | for (i = 0; i < noplen; i++) | |
1430 | EMIT1(ideal_nops[noplen][i]); | |
1431 | len -= noplen; | |
1432 | } | |
1433 | ||
1434 | *pprog = prog; | |
1435 | } | |
1436 | ||
1437 | static void emit_align(u8 **pprog, u32 align) | |
1438 | { | |
1439 | u8 *target, *prog = *pprog; | |
1440 | ||
1441 | target = PTR_ALIGN(prog, align); | |
1442 | if (target != prog) | |
1443 | emit_nops(&prog, target - prog); | |
1444 | ||
1445 | *pprog = prog; | |
1446 | } | |
1447 | ||
1448 | static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) | |
1449 | { | |
1450 | u8 *prog = *pprog; | |
1451 | int cnt = 0; | |
1452 | s64 offset; | |
1453 | ||
1454 | offset = func - (ip + 2 + 4); | |
1455 | if (!is_simm32(offset)) { | |
1456 | pr_err("Target %p is out of range\n", func); | |
1457 | return -EINVAL; | |
1458 | } | |
1459 | EMIT2_off32(0x0F, jmp_cond + 0x10, offset); | |
1460 | *pprog = prog; | |
1461 | return 0; | |
1462 | } | |
1463 | ||
85d33df3 | 1464 | static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, |
88fd9e53 | 1465 | struct bpf_tramp_progs *tp, int stack_size) |
fec56f58 | 1466 | { |
7e639208 | 1467 | int i; |
fec56f58 | 1468 | u8 *prog = *pprog; |
fec56f58 | 1469 | |
88fd9e53 | 1470 | for (i = 0; i < tp->nr_progs; i++) { |
ae240823 KS |
1471 | if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false)) |
1472 | return -EINVAL; | |
1473 | } | |
1474 | *pprog = prog; | |
1475 | return 0; | |
1476 | } | |
1477 | ||
1478 | static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, | |
1479 | struct bpf_tramp_progs *tp, int stack_size, | |
1480 | u8 **branches) | |
1481 | { | |
1482 | u8 *prog = *pprog; | |
13fac1d8 | 1483 | int i, cnt = 0; |
ae240823 KS |
1484 | |
1485 | /* The first fmod_ret program will receive a garbage return value. | |
1486 | * Set this to 0 to avoid confusing the program. | |
1487 | */ | |
1488 | emit_mov_imm32(&prog, false, BPF_REG_0, 0); | |
1489 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); | |
1490 | for (i = 0; i < tp->nr_progs; i++) { | |
1491 | if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true)) | |
fec56f58 | 1492 | return -EINVAL; |
ae240823 | 1493 | |
13fac1d8 AS |
1494 | /* mod_ret prog stored return value into [rbp - 8]. Emit: |
1495 | * if (*(u64 *)(rbp - 8) != 0) | |
ae240823 | 1496 | * goto do_fexit; |
ae240823 | 1497 | */ |
13fac1d8 AS |
1498 | /* cmp QWORD PTR [rbp - 0x8], 0x0 */ |
1499 | EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); | |
ae240823 KS |
1500 | |
1501 | /* Save the location of the branch and Generate 6 nops | |
1502 | * (4 bytes for an offset and 2 bytes for the jump) These nops | |
1503 | * are replaced with a conditional jump once do_fexit (i.e. the | |
1504 | * start of the fexit invocation) is finalized. | |
1505 | */ | |
1506 | branches[i] = prog; | |
1507 | emit_nops(&prog, 4 + 2); | |
fec56f58 | 1508 | } |
ae240823 | 1509 | |
fec56f58 AS |
1510 | *pprog = prog; |
1511 | return 0; | |
1512 | } | |
1513 | ||
1514 | /* Example: | |
1515 | * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); | |
1516 | * its 'struct btf_func_model' will be nr_args=2 | |
1517 | * The assembly code when eth_type_trans is executing after trampoline: | |
1518 | * | |
1519 | * push rbp | |
1520 | * mov rbp, rsp | |
1521 | * sub rsp, 16 // space for skb and dev | |
1522 | * push rbx // temp regs to pass start time | |
1523 | * mov qword ptr [rbp - 16], rdi // save skb pointer to stack | |
1524 | * mov qword ptr [rbp - 8], rsi // save dev pointer to stack | |
1525 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable | |
1526 | * mov rbx, rax // remember start time in bpf stats are enabled | |
1527 | * lea rdi, [rbp - 16] // R1==ctx of bpf prog | |
1528 | * call addr_of_jited_FENTRY_prog | |
1529 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off | |
1530 | * mov rsi, rbx // prog start time | |
1531 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math | |
1532 | * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack | |
1533 | * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack | |
1534 | * pop rbx | |
1535 | * leave | |
1536 | * ret | |
1537 | * | |
1538 | * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be | |
1539 | * replaced with 'call generated_bpf_trampoline'. When it returns | |
1540 | * eth_type_trans will continue executing with original skb and dev pointers. | |
1541 | * | |
1542 | * The assembly code when eth_type_trans is called from trampoline: | |
1543 | * | |
1544 | * push rbp | |
1545 | * mov rbp, rsp | |
1546 | * sub rsp, 24 // space for skb, dev, return value | |
1547 | * push rbx // temp regs to pass start time | |
1548 | * mov qword ptr [rbp - 24], rdi // save skb pointer to stack | |
1549 | * mov qword ptr [rbp - 16], rsi // save dev pointer to stack | |
1550 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable | |
1551 | * mov rbx, rax // remember start time if bpf stats are enabled | |
1552 | * lea rdi, [rbp - 24] // R1==ctx of bpf prog | |
1553 | * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev | |
1554 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off | |
1555 | * mov rsi, rbx // prog start time | |
1556 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math | |
1557 | * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack | |
1558 | * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack | |
1559 | * call eth_type_trans+5 // execute body of eth_type_trans | |
1560 | * mov qword ptr [rbp - 8], rax // save return value | |
1561 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable | |
1562 | * mov rbx, rax // remember start time in bpf stats are enabled | |
1563 | * lea rdi, [rbp - 24] // R1==ctx of bpf prog | |
1564 | * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value | |
1565 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off | |
1566 | * mov rsi, rbx // prog start time | |
1567 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math | |
1568 | * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value | |
1569 | * pop rbx | |
1570 | * leave | |
1571 | * add rsp, 8 // skip eth_type_trans's frame | |
1572 | * ret // return to its caller | |
1573 | */ | |
85d33df3 MKL |
1574 | int arch_prepare_bpf_trampoline(void *image, void *image_end, |
1575 | const struct btf_func_model *m, u32 flags, | |
88fd9e53 | 1576 | struct bpf_tramp_progs *tprogs, |
fec56f58 AS |
1577 | void *orig_call) |
1578 | { | |
ae240823 | 1579 | int ret, i, cnt = 0, nr_args = m->nr_args; |
fec56f58 | 1580 | int stack_size = nr_args * 8; |
88fd9e53 KS |
1581 | struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY]; |
1582 | struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT]; | |
ae240823 KS |
1583 | struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; |
1584 | u8 **branches = NULL; | |
fec56f58 AS |
1585 | u8 *prog; |
1586 | ||
1587 | /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ | |
1588 | if (nr_args > 6) | |
1589 | return -ENOTSUPP; | |
1590 | ||
1591 | if ((flags & BPF_TRAMP_F_RESTORE_REGS) && | |
1592 | (flags & BPF_TRAMP_F_SKIP_FRAME)) | |
1593 | return -EINVAL; | |
1594 | ||
1595 | if (flags & BPF_TRAMP_F_CALL_ORIG) | |
1596 | stack_size += 8; /* room for return value of orig_call */ | |
1597 | ||
1598 | if (flags & BPF_TRAMP_F_SKIP_FRAME) | |
1599 | /* skip patched call instruction and point orig_call to actual | |
1600 | * body of the kernel function. | |
1601 | */ | |
4b3da77b | 1602 | orig_call += X86_PATCH_SIZE; |
fec56f58 AS |
1603 | |
1604 | prog = image; | |
1605 | ||
1606 | EMIT1(0x55); /* push rbp */ | |
1607 | EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ | |
1608 | EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ | |
1609 | EMIT1(0x53); /* push rbx */ | |
1610 | ||
1611 | save_regs(m, &prog, nr_args, stack_size); | |
1612 | ||
88fd9e53 KS |
1613 | if (fentry->nr_progs) |
1614 | if (invoke_bpf(m, &prog, fentry, stack_size)) | |
fec56f58 AS |
1615 | return -EINVAL; |
1616 | ||
ae240823 KS |
1617 | if (fmod_ret->nr_progs) { |
1618 | branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *), | |
1619 | GFP_KERNEL); | |
1620 | if (!branches) | |
1621 | return -ENOMEM; | |
1622 | ||
1623 | if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size, | |
1624 | branches)) { | |
1625 | ret = -EINVAL; | |
1626 | goto cleanup; | |
1627 | } | |
1628 | } | |
1629 | ||
fec56f58 | 1630 | if (flags & BPF_TRAMP_F_CALL_ORIG) { |
ae240823 | 1631 | if (fentry->nr_progs || fmod_ret->nr_progs) |
fec56f58 AS |
1632 | restore_regs(m, &prog, nr_args, stack_size); |
1633 | ||
1634 | /* call original function */ | |
ae240823 KS |
1635 | if (emit_call(&prog, orig_call, prog)) { |
1636 | ret = -EINVAL; | |
1637 | goto cleanup; | |
1638 | } | |
fec56f58 AS |
1639 | /* remember return value in a stack for bpf prog to access */ |
1640 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); | |
1641 | } | |
1642 | ||
ae240823 KS |
1643 | if (fmod_ret->nr_progs) { |
1644 | /* From Intel 64 and IA-32 Architectures Optimization | |
1645 | * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler | |
1646 | * Coding Rule 11: All branch targets should be 16-byte | |
1647 | * aligned. | |
1648 | */ | |
1649 | emit_align(&prog, 16); | |
1650 | /* Update the branches saved in invoke_bpf_mod_ret with the | |
1651 | * aligned address of do_fexit. | |
1652 | */ | |
1653 | for (i = 0; i < fmod_ret->nr_progs; i++) | |
1654 | emit_cond_near_jump(&branches[i], prog, branches[i], | |
1655 | X86_JNE); | |
1656 | } | |
1657 | ||
88fd9e53 | 1658 | if (fexit->nr_progs) |
ae240823 KS |
1659 | if (invoke_bpf(m, &prog, fexit, stack_size)) { |
1660 | ret = -EINVAL; | |
1661 | goto cleanup; | |
1662 | } | |
fec56f58 AS |
1663 | |
1664 | if (flags & BPF_TRAMP_F_RESTORE_REGS) | |
1665 | restore_regs(m, &prog, nr_args, stack_size); | |
1666 | ||
ae240823 KS |
1667 | /* This needs to be done regardless. If there were fmod_ret programs, |
1668 | * the return value is only updated on the stack and still needs to be | |
1669 | * restored to R0. | |
1670 | */ | |
fec56f58 AS |
1671 | if (flags & BPF_TRAMP_F_CALL_ORIG) |
1672 | /* restore original return value back into RAX */ | |
1673 | emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); | |
1674 | ||
1675 | EMIT1(0x5B); /* pop rbx */ | |
1676 | EMIT1(0xC9); /* leave */ | |
1677 | if (flags & BPF_TRAMP_F_SKIP_FRAME) | |
1678 | /* skip our return address and return to parent */ | |
1679 | EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ | |
1680 | EMIT1(0xC3); /* ret */ | |
85d33df3 | 1681 | /* Make sure the trampoline generation logic doesn't overflow */ |
ae240823 KS |
1682 | if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { |
1683 | ret = -EFAULT; | |
1684 | goto cleanup; | |
1685 | } | |
1686 | ret = prog - (u8 *)image; | |
1687 | ||
1688 | cleanup: | |
1689 | kfree(branches); | |
1690 | return ret; | |
fec56f58 AS |
1691 | } |
1692 | ||
75ccbef6 BT |
1693 | static int emit_fallback_jump(u8 **pprog) |
1694 | { | |
1695 | u8 *prog = *pprog; | |
1696 | int err = 0; | |
1697 | ||
1698 | #ifdef CONFIG_RETPOLINE | |
1699 | /* Note that this assumes the the compiler uses external | |
1700 | * thunks for indirect calls. Both clang and GCC use the same | |
1701 | * naming convention for external thunks. | |
1702 | */ | |
1703 | err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog); | |
1704 | #else | |
1705 | int cnt = 0; | |
1706 | ||
1707 | EMIT2(0xFF, 0xE2); /* jmp rdx */ | |
1708 | #endif | |
1709 | *pprog = prog; | |
1710 | return err; | |
1711 | } | |
1712 | ||
1713 | static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) | |
1714 | { | |
7e639208 | 1715 | u8 *jg_reloc, *prog = *pprog; |
75ccbef6 | 1716 | int pivot, err, jg_bytes = 1, cnt = 0; |
75ccbef6 BT |
1717 | s64 jg_offset; |
1718 | ||
1719 | if (a == b) { | |
1720 | /* Leaf node of recursion, i.e. not a range of indices | |
1721 | * anymore. | |
1722 | */ | |
1723 | EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ | |
1724 | if (!is_simm32(progs[a])) | |
1725 | return -1; | |
1726 | EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), | |
1727 | progs[a]); | |
1728 | err = emit_cond_near_jump(&prog, /* je func */ | |
1729 | (void *)progs[a], prog, | |
1730 | X86_JE); | |
1731 | if (err) | |
1732 | return err; | |
1733 | ||
1734 | err = emit_fallback_jump(&prog); /* jmp thunk/indirect */ | |
1735 | if (err) | |
1736 | return err; | |
1737 | ||
1738 | *pprog = prog; | |
1739 | return 0; | |
1740 | } | |
1741 | ||
1742 | /* Not a leaf node, so we pivot, and recursively descend into | |
1743 | * the lower and upper ranges. | |
1744 | */ | |
1745 | pivot = (b - a) / 2; | |
1746 | EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ | |
1747 | if (!is_simm32(progs[a + pivot])) | |
1748 | return -1; | |
1749 | EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); | |
1750 | ||
1751 | if (pivot > 2) { /* jg upper_part */ | |
1752 | /* Require near jump. */ | |
1753 | jg_bytes = 4; | |
1754 | EMIT2_off32(0x0F, X86_JG + 0x10, 0); | |
1755 | } else { | |
1756 | EMIT2(X86_JG, 0); | |
1757 | } | |
1758 | jg_reloc = prog; | |
1759 | ||
1760 | err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ | |
1761 | progs); | |
1762 | if (err) | |
1763 | return err; | |
1764 | ||
116eb788 BT |
1765 | /* From Intel 64 and IA-32 Architectures Optimization |
1766 | * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler | |
1767 | * Coding Rule 11: All branch targets should be 16-byte | |
1768 | * aligned. | |
1769 | */ | |
7e639208 | 1770 | emit_align(&prog, 16); |
75ccbef6 BT |
1771 | jg_offset = prog - jg_reloc; |
1772 | emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); | |
1773 | ||
1774 | err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ | |
1775 | b, progs); | |
1776 | if (err) | |
1777 | return err; | |
1778 | ||
1779 | *pprog = prog; | |
1780 | return 0; | |
1781 | } | |
1782 | ||
1783 | static int cmp_ips(const void *a, const void *b) | |
1784 | { | |
1785 | const s64 *ipa = a; | |
1786 | const s64 *ipb = b; | |
1787 | ||
1788 | if (*ipa > *ipb) | |
1789 | return 1; | |
1790 | if (*ipa < *ipb) | |
1791 | return -1; | |
1792 | return 0; | |
1793 | } | |
1794 | ||
1795 | int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) | |
1796 | { | |
1797 | u8 *prog = image; | |
1798 | ||
1799 | sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); | |
1800 | return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs); | |
1801 | } | |
1802 | ||
1c2a088a AS |
1803 | struct x64_jit_data { |
1804 | struct bpf_binary_header *header; | |
1805 | int *addrs; | |
1806 | u8 *image; | |
1807 | int proglen; | |
1808 | struct jit_context ctx; | |
1809 | }; | |
1810 | ||
d1c55ab5 | 1811 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) |
f3c2af7b AS |
1812 | { |
1813 | struct bpf_binary_header *header = NULL; | |
959a7579 | 1814 | struct bpf_prog *tmp, *orig_prog = prog; |
1c2a088a | 1815 | struct x64_jit_data *jit_data; |
f3c2af7b AS |
1816 | int proglen, oldproglen = 0; |
1817 | struct jit_context ctx = {}; | |
959a7579 | 1818 | bool tmp_blinded = false; |
1c2a088a | 1819 | bool extra_pass = false; |
f3c2af7b AS |
1820 | u8 *image = NULL; |
1821 | int *addrs; | |
1822 | int pass; | |
1823 | int i; | |
1824 | ||
60b58afc | 1825 | if (!prog->jit_requested) |
959a7579 DB |
1826 | return orig_prog; |
1827 | ||
1828 | tmp = bpf_jit_blind_constants(prog); | |
a2c7a983 IM |
1829 | /* |
1830 | * If blinding was requested and we failed during blinding, | |
959a7579 DB |
1831 | * we must fall back to the interpreter. |
1832 | */ | |
1833 | if (IS_ERR(tmp)) | |
1834 | return orig_prog; | |
1835 | if (tmp != prog) { | |
1836 | tmp_blinded = true; | |
1837 | prog = tmp; | |
1838 | } | |
0a14842f | 1839 | |
1c2a088a AS |
1840 | jit_data = prog->aux->jit_data; |
1841 | if (!jit_data) { | |
1842 | jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); | |
1843 | if (!jit_data) { | |
1844 | prog = orig_prog; | |
1845 | goto out; | |
1846 | } | |
1847 | prog->aux->jit_data = jit_data; | |
1848 | } | |
1849 | addrs = jit_data->addrs; | |
1850 | if (addrs) { | |
1851 | ctx = jit_data->ctx; | |
1852 | oldproglen = jit_data->proglen; | |
1853 | image = jit_data->image; | |
1854 | header = jit_data->header; | |
1855 | extra_pass = true; | |
1856 | goto skip_init_addrs; | |
1857 | } | |
7c2e988f | 1858 | addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); |
959a7579 DB |
1859 | if (!addrs) { |
1860 | prog = orig_prog; | |
1c2a088a | 1861 | goto out_addrs; |
959a7579 | 1862 | } |
f3c2af7b | 1863 | |
a2c7a983 IM |
1864 | /* |
1865 | * Before first pass, make a rough estimation of addrs[] | |
1866 | * each BPF instruction is translated to less than 64 bytes | |
f3c2af7b | 1867 | */ |
7c2e988f | 1868 | for (proglen = 0, i = 0; i <= prog->len; i++) { |
f3c2af7b AS |
1869 | proglen += 64; |
1870 | addrs[i] = proglen; | |
1871 | } | |
1872 | ctx.cleanup_addr = proglen; | |
1c2a088a | 1873 | skip_init_addrs: |
f3c2af7b | 1874 | |
a2c7a983 IM |
1875 | /* |
1876 | * JITed image shrinks with every pass and the loop iterates | |
1877 | * until the image stops shrinking. Very large BPF programs | |
3f7352bf | 1878 | * may converge on the last pass. In such case do one more |
a2c7a983 | 1879 | * pass to emit the final image. |
3f7352bf | 1880 | */ |
6007b080 | 1881 | for (pass = 0; pass < 20 || image; pass++) { |
f3c2af7b AS |
1882 | proglen = do_jit(prog, addrs, image, oldproglen, &ctx); |
1883 | if (proglen <= 0) { | |
3aab8884 | 1884 | out_image: |
f3c2af7b AS |
1885 | image = NULL; |
1886 | if (header) | |
738cbe72 | 1887 | bpf_jit_binary_free(header); |
959a7579 DB |
1888 | prog = orig_prog; |
1889 | goto out_addrs; | |
f3c2af7b | 1890 | } |
0a14842f | 1891 | if (image) { |
e0ee9c12 | 1892 | if (proglen != oldproglen) { |
f3c2af7b AS |
1893 | pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", |
1894 | proglen, oldproglen); | |
3aab8884 | 1895 | goto out_image; |
e0ee9c12 | 1896 | } |
0a14842f ED |
1897 | break; |
1898 | } | |
1899 | if (proglen == oldproglen) { | |
3dec541b AS |
1900 | /* |
1901 | * The number of entries in extable is the number of BPF_LDX | |
1902 | * insns that access kernel memory via "pointer to BTF type". | |
1903 | * The verifier changed their opcode from LDX|MEM|size | |
1904 | * to LDX|PROBE_MEM|size to make JITing easier. | |
1905 | */ | |
1906 | u32 align = __alignof__(struct exception_table_entry); | |
1907 | u32 extable_size = prog->aux->num_exentries * | |
1908 | sizeof(struct exception_table_entry); | |
1909 | ||
1910 | /* allocate module memory for x86 insns and extable */ | |
1911 | header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size, | |
1912 | &image, align, jit_fill_hole); | |
959a7579 DB |
1913 | if (!header) { |
1914 | prog = orig_prog; | |
1915 | goto out_addrs; | |
1916 | } | |
3dec541b | 1917 | prog->aux->extable = (void *) image + roundup(proglen, align); |
0a14842f ED |
1918 | } |
1919 | oldproglen = proglen; | |
6007b080 | 1920 | cond_resched(); |
0a14842f | 1921 | } |
79617801 | 1922 | |
0a14842f | 1923 | if (bpf_jit_enable > 1) |
485d6511 | 1924 | bpf_jit_dump(prog->len, proglen, pass + 1, image); |
0a14842f ED |
1925 | |
1926 | if (image) { | |
1c2a088a | 1927 | if (!prog->is_func || extra_pass) { |
428d5df1 | 1928 | bpf_tail_call_direct_fixup(prog); |
1c2a088a AS |
1929 | bpf_jit_binary_lock_ro(header); |
1930 | } else { | |
1931 | jit_data->addrs = addrs; | |
1932 | jit_data->ctx = ctx; | |
1933 | jit_data->proglen = proglen; | |
1934 | jit_data->image = image; | |
1935 | jit_data->header = header; | |
1936 | } | |
f3c2af7b | 1937 | prog->bpf_func = (void *)image; |
a91263d5 | 1938 | prog->jited = 1; |
783d28dd | 1939 | prog->jited_len = proglen; |
9d5ecb09 DB |
1940 | } else { |
1941 | prog = orig_prog; | |
0a14842f | 1942 | } |
959a7579 | 1943 | |
39f56ca9 | 1944 | if (!image || !prog->is_func || extra_pass) { |
c454a46b | 1945 | if (image) |
7c2e988f | 1946 | bpf_prog_fill_jited_linfo(prog, addrs + 1); |
959a7579 | 1947 | out_addrs: |
1c2a088a AS |
1948 | kfree(addrs); |
1949 | kfree(jit_data); | |
1950 | prog->aux->jit_data = NULL; | |
1951 | } | |
959a7579 DB |
1952 | out: |
1953 | if (tmp_blinded) | |
1954 | bpf_jit_prog_release_other(prog, prog == orig_prog ? | |
1955 | tmp : orig_prog); | |
d1c55ab5 | 1956 | return prog; |
0a14842f | 1957 | } |