]> git.ipfire.org Git - people/ms/linux.git/blame - arch/x86/net/bpf_jit_comp.c
bpf, x86: Remove unused cnt increase from EMIT macro
[people/ms/linux.git] / arch / x86 / net / bpf_jit_comp.c
CommitLineData
b886d83c 1// SPDX-License-Identifier: GPL-2.0-only
a2c7a983
IM
2/*
3 * bpf_jit_comp.c: BPF JIT compiler
0a14842f 4 *
3b58908a 5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
62258278 6 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
0a14842f 7 */
0a14842f
ED
8#include <linux/netdevice.h>
9#include <linux/filter.h>
855ddb56 10#include <linux/if_vlan.h>
71d22d58 11#include <linux/bpf.h>
5964b200 12#include <linux/memory.h>
75ccbef6 13#include <linux/sort.h>
3dec541b 14#include <asm/extable.h>
d1163651 15#include <asm/set_memory.h>
a493a87f 16#include <asm/nospec-branch.h>
5964b200 17#include <asm/text-patching.h>
75ccbef6 18#include <asm/asm-prototypes.h>
0a14842f 19
5cccc702 20static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
0a14842f
ED
21{
22 if (len == 1)
23 *ptr = bytes;
24 else if (len == 2)
25 *(u16 *)ptr = bytes;
26 else {
27 *(u32 *)ptr = bytes;
28 barrier();
29 }
30 return ptr + len;
31}
32
b52f00e6 33#define EMIT(bytes, len) \
ced50fc4 34 do { prog = emit_code(prog, bytes, len); } while (0)
0a14842f
ED
35
36#define EMIT1(b1) EMIT(b1, 1)
37#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
38#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
39#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
a2c7a983 40
62258278 41#define EMIT1_off32(b1, off) \
a2c7a983 42 do { EMIT1(b1); EMIT(off, 4); } while (0)
62258278 43#define EMIT2_off32(b1, b2, off) \
a2c7a983 44 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
62258278 45#define EMIT3_off32(b1, b2, b3, off) \
a2c7a983 46 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
62258278 47#define EMIT4_off32(b1, b2, b3, b4, off) \
a2c7a983 48 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
0a14842f 49
5cccc702 50static bool is_imm8(int value)
0a14842f
ED
51{
52 return value <= 127 && value >= -128;
53}
54
5cccc702 55static bool is_simm32(s64 value)
0a14842f 56{
6fe8b9c1
DB
57 return value == (s64)(s32)value;
58}
59
60static bool is_uimm32(u64 value)
61{
62 return value == (u64)(u32)value;
0a14842f
ED
63}
64
e430f34e 65/* mov dst, src */
a2c7a983
IM
66#define EMIT_mov(DST, SRC) \
67 do { \
68 if (DST != SRC) \
69 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
62258278
AS
70 } while (0)
71
72static int bpf_size_to_x86_bytes(int bpf_size)
73{
74 if (bpf_size == BPF_W)
75 return 4;
76 else if (bpf_size == BPF_H)
77 return 2;
78 else if (bpf_size == BPF_B)
79 return 1;
80 else if (bpf_size == BPF_DW)
81 return 4; /* imm32 */
82 else
83 return 0;
84}
0a14842f 85
a2c7a983
IM
86/*
87 * List of x86 cond jumps opcodes (. + s8)
0a14842f
ED
88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
89 */
90#define X86_JB 0x72
91#define X86_JAE 0x73
92#define X86_JE 0x74
93#define X86_JNE 0x75
94#define X86_JBE 0x76
95#define X86_JA 0x77
52afc51e 96#define X86_JL 0x7C
62258278 97#define X86_JGE 0x7D
52afc51e 98#define X86_JLE 0x7E
62258278 99#define X86_JG 0x7F
0a14842f 100
a2c7a983 101/* Pick a register outside of BPF range for JIT internal work */
959a7579 102#define AUX_REG (MAX_BPF_JIT_REG + 1)
fec56f58 103#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
62258278 104
a2c7a983
IM
105/*
106 * The following table maps BPF registers to x86-64 registers.
959a7579 107 *
a2c7a983 108 * x86-64 register R12 is unused, since if used as base address
959a7579
DB
109 * register in load/store instructions, it always needs an
110 * extra byte of encoding and is callee saved.
111 *
fec56f58
AS
112 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
113 * trampoline. x86-64 register R10 is used for blinding (if enabled).
62258278
AS
114 */
115static const int reg2hex[] = {
a2c7a983
IM
116 [BPF_REG_0] = 0, /* RAX */
117 [BPF_REG_1] = 7, /* RDI */
118 [BPF_REG_2] = 6, /* RSI */
119 [BPF_REG_3] = 2, /* RDX */
120 [BPF_REG_4] = 1, /* RCX */
121 [BPF_REG_5] = 0, /* R8 */
122 [BPF_REG_6] = 3, /* RBX callee saved */
123 [BPF_REG_7] = 5, /* R13 callee saved */
124 [BPF_REG_8] = 6, /* R14 callee saved */
125 [BPF_REG_9] = 7, /* R15 callee saved */
126 [BPF_REG_FP] = 5, /* RBP readonly */
127 [BPF_REG_AX] = 2, /* R10 temp register */
128 [AUX_REG] = 3, /* R11 temp register */
fec56f58 129 [X86_REG_R9] = 1, /* R9 register, 6th function argument */
62258278
AS
130};
131
3dec541b
AS
132static const int reg2pt_regs[] = {
133 [BPF_REG_0] = offsetof(struct pt_regs, ax),
134 [BPF_REG_1] = offsetof(struct pt_regs, di),
135 [BPF_REG_2] = offsetof(struct pt_regs, si),
136 [BPF_REG_3] = offsetof(struct pt_regs, dx),
137 [BPF_REG_4] = offsetof(struct pt_regs, cx),
138 [BPF_REG_5] = offsetof(struct pt_regs, r8),
139 [BPF_REG_6] = offsetof(struct pt_regs, bx),
140 [BPF_REG_7] = offsetof(struct pt_regs, r13),
141 [BPF_REG_8] = offsetof(struct pt_regs, r14),
142 [BPF_REG_9] = offsetof(struct pt_regs, r15),
143};
144
a2c7a983
IM
145/*
146 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
62258278
AS
147 * which need extra byte of encoding.
148 * rax,rcx,...,rbp have simpler encoding
149 */
5cccc702 150static bool is_ereg(u32 reg)
62258278 151{
d148134b
JP
152 return (1 << reg) & (BIT(BPF_REG_5) |
153 BIT(AUX_REG) |
154 BIT(BPF_REG_7) |
155 BIT(BPF_REG_8) |
959a7579 156 BIT(BPF_REG_9) |
fec56f58 157 BIT(X86_REG_R9) |
959a7579 158 BIT(BPF_REG_AX));
62258278
AS
159}
160
aee194b1
LN
161/*
162 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
163 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
164 * of encoding. al,cl,dl,bl have simpler encoding.
165 */
166static bool is_ereg_8l(u32 reg)
167{
168 return is_ereg(reg) ||
169 (1 << reg) & (BIT(BPF_REG_1) |
170 BIT(BPF_REG_2) |
171 BIT(BPF_REG_FP));
172}
173
de0a444d
DB
174static bool is_axreg(u32 reg)
175{
176 return reg == BPF_REG_0;
177}
178
a2c7a983 179/* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
5cccc702 180static u8 add_1mod(u8 byte, u32 reg)
62258278
AS
181{
182 if (is_ereg(reg))
183 byte |= 1;
184 return byte;
185}
186
5cccc702 187static u8 add_2mod(u8 byte, u32 r1, u32 r2)
62258278
AS
188{
189 if (is_ereg(r1))
190 byte |= 1;
191 if (is_ereg(r2))
192 byte |= 4;
193 return byte;
194}
195
a2c7a983 196/* Encode 'dst_reg' register into x86-64 opcode 'byte' */
5cccc702 197static u8 add_1reg(u8 byte, u32 dst_reg)
62258278 198{
e430f34e 199 return byte + reg2hex[dst_reg];
62258278
AS
200}
201
a2c7a983 202/* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
5cccc702 203static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
62258278 204{
e430f34e 205 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
62258278
AS
206}
207
e5f02cac
BJ
208/* Some 1-byte opcodes for binary ALU operations */
209static u8 simple_alu_opcodes[] = {
210 [BPF_ADD] = 0x01,
211 [BPF_SUB] = 0x29,
212 [BPF_AND] = 0x21,
213 [BPF_OR] = 0x09,
214 [BPF_XOR] = 0x31,
215 [BPF_LSH] = 0xE0,
216 [BPF_RSH] = 0xE8,
217 [BPF_ARSH] = 0xF8,
218};
219
738cbe72
DB
220static void jit_fill_hole(void *area, unsigned int size)
221{
a2c7a983 222 /* Fill whole space with INT3 instructions */
738cbe72
DB
223 memset(area, 0xcc, size);
224}
225
f3c2af7b 226struct jit_context {
a2c7a983 227 int cleanup_addr; /* Epilogue code offset */
f3c2af7b
AS
228};
229
a2c7a983 230/* Maximum number of bytes emitted while JITing one eBPF insn */
e0ee9c12
AS
231#define BPF_MAX_INSN_SIZE 128
232#define BPF_INSN_SAFETY 64
4b3da77b
DB
233
234/* Number of bytes emit_patch() needs to generate instructions */
235#define X86_PATCH_SIZE 5
ebf7d1f5
MF
236/* Number of bytes that will be skipped on tailcall */
237#define X86_TAIL_CALL_OFFSET 11
e0ee9c12 238
ebf7d1f5
MF
239static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
240{
241 u8 *prog = *pprog;
ebf7d1f5
MF
242
243 if (callee_regs_used[0])
244 EMIT1(0x53); /* push rbx */
245 if (callee_regs_used[1])
246 EMIT2(0x41, 0x55); /* push r13 */
247 if (callee_regs_used[2])
248 EMIT2(0x41, 0x56); /* push r14 */
249 if (callee_regs_used[3])
250 EMIT2(0x41, 0x57); /* push r15 */
251 *pprog = prog;
252}
253
254static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
255{
256 u8 *prog = *pprog;
ebf7d1f5
MF
257
258 if (callee_regs_used[3])
259 EMIT2(0x41, 0x5F); /* pop r15 */
260 if (callee_regs_used[2])
261 EMIT2(0x41, 0x5E); /* pop r14 */
262 if (callee_regs_used[1])
263 EMIT2(0x41, 0x5D); /* pop r13 */
264 if (callee_regs_used[0])
265 EMIT1(0x5B); /* pop rbx */
266 *pprog = prog;
267}
b52f00e6 268
a2c7a983 269/*
ebf7d1f5
MF
270 * Emit x86-64 prologue code for BPF program.
271 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
272 * while jumping to another program
b52f00e6 273 */
ebf7d1f5
MF
274static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
275 bool tail_call_reachable, bool is_subprog)
0a14842f 276{
b52f00e6 277 u8 *prog = *pprog;
0a14842f 278
9fd4a39d
AS
279 /* BPF trampoline can be made to work without these nops,
280 * but let's waste 5 bytes for now and optimize later
281 */
ced50fc4
JO
282 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
283 prog += X86_PATCH_SIZE;
ebf7d1f5
MF
284 if (!ebpf_from_cbpf) {
285 if (tail_call_reachable && !is_subprog)
286 EMIT2(0x31, 0xC0); /* xor eax, eax */
287 else
288 EMIT2(0x66, 0x90); /* nop2 */
289 }
fe8d9571
AS
290 EMIT1(0x55); /* push rbp */
291 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
292 /* sub rsp, rounded_stack_depth */
4d0b8c0b
MF
293 if (stack_depth)
294 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
ebf7d1f5
MF
295 if (tail_call_reachable)
296 EMIT1(0x50); /* push rax */
b52f00e6
AS
297 *pprog = prog;
298}
299
428d5df1
DB
300static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
301{
302 u8 *prog = *pprog;
428d5df1
DB
303 s64 offset;
304
305 offset = func - (ip + X86_PATCH_SIZE);
306 if (!is_simm32(offset)) {
307 pr_err("Target call %p is out of range\n", func);
308 return -ERANGE;
309 }
310 EMIT1_off32(opcode, offset);
311 *pprog = prog;
312 return 0;
313}
314
315static int emit_call(u8 **pprog, void *func, void *ip)
316{
317 return emit_patch(pprog, func, ip, 0xE8);
318}
319
320static int emit_jump(u8 **pprog, void *func, void *ip)
321{
322 return emit_patch(pprog, func, ip, 0xE9);
323}
324
325static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
326 void *old_addr, void *new_addr,
327 const bool text_live)
328{
a89dfde3 329 const u8 *nop_insn = x86_nops[5];
b553a6ec
DB
330 u8 old_insn[X86_PATCH_SIZE];
331 u8 new_insn[X86_PATCH_SIZE];
428d5df1
DB
332 u8 *prog;
333 int ret;
334
b553a6ec
DB
335 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
336 if (old_addr) {
337 prog = old_insn;
338 ret = t == BPF_MOD_CALL ?
339 emit_call(&prog, old_addr, ip) :
340 emit_jump(&prog, old_addr, ip);
341 if (ret)
342 return ret;
428d5df1
DB
343 }
344
b553a6ec
DB
345 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
346 if (new_addr) {
347 prog = new_insn;
348 ret = t == BPF_MOD_CALL ?
349 emit_call(&prog, new_addr, ip) :
350 emit_jump(&prog, new_addr, ip);
351 if (ret)
352 return ret;
428d5df1
DB
353 }
354
355 ret = -EBUSY;
356 mutex_lock(&text_mutex);
357 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
358 goto out;
ebf7d1f5 359 ret = 1;
b553a6ec
DB
360 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
361 if (text_live)
362 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
363 else
364 memcpy(ip, new_insn, X86_PATCH_SIZE);
ebf7d1f5 365 ret = 0;
b553a6ec 366 }
428d5df1
DB
367out:
368 mutex_unlock(&text_mutex);
369 return ret;
370}
371
372int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
373 void *old_addr, void *new_addr)
374{
375 if (!is_kernel_text((long)ip) &&
376 !is_bpf_text_address((long)ip))
377 /* BPF poking in modules is not supported */
378 return -EINVAL;
379
380 return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
381}
382
ebf7d1f5
MF
383static int get_pop_bytes(bool *callee_regs_used)
384{
385 int bytes = 0;
386
387 if (callee_regs_used[3])
388 bytes += 2;
389 if (callee_regs_used[2])
390 bytes += 2;
391 if (callee_regs_used[1])
392 bytes += 2;
393 if (callee_regs_used[0])
394 bytes += 1;
395
396 return bytes;
397}
398
a2c7a983
IM
399/*
400 * Generate the following code:
401 *
b52f00e6
AS
402 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
403 * if (index >= array->map.max_entries)
404 * goto out;
405 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
406 * goto out;
2a36f0b9 407 * prog = array->ptrs[index];
b52f00e6
AS
408 * if (prog == NULL)
409 * goto out;
410 * goto *(prog->bpf_func + prologue_size);
411 * out:
412 */
ebf7d1f5
MF
413static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
414 u32 stack_depth)
b52f00e6 415{
ebf7d1f5 416 int tcc_off = -4 - round_up(stack_depth, 8);
b52f00e6 417 u8 *prog = *pprog;
ebf7d1f5 418 int pop_bytes = 0;
4d0b8c0b
MF
419 int off1 = 42;
420 int off2 = 31;
421 int off3 = 9;
b52f00e6 422
ebf7d1f5
MF
423 /* count the additional bytes used for popping callee regs from stack
424 * that need to be taken into account for each of the offsets that
425 * are used for bailing out of the tail call
426 */
427 pop_bytes = get_pop_bytes(callee_regs_used);
428 off1 += pop_bytes;
429 off2 += pop_bytes;
430 off3 += pop_bytes;
431
4d0b8c0b
MF
432 if (stack_depth) {
433 off1 += 7;
434 off2 += 7;
435 off3 += 7;
436 }
437
a2c7a983
IM
438 /*
439 * rdi - pointer to ctx
b52f00e6
AS
440 * rsi - pointer to bpf_array
441 * rdx - index in bpf_array
442 */
443
a2c7a983
IM
444 /*
445 * if (index >= array->map.max_entries)
446 * goto out;
b52f00e6 447 */
90caccdd
AS
448 EMIT2(0x89, 0xD2); /* mov edx, edx */
449 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
b52f00e6 450 offsetof(struct bpf_array, map.max_entries));
ebf7d1f5 451#define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */
b52f00e6 452 EMIT2(X86_JBE, OFFSET1); /* jbe out */
b52f00e6 453
a2c7a983
IM
454 /*
455 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
456 * goto out;
b52f00e6 457 */
ebf7d1f5 458 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
b52f00e6 459 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
ebf7d1f5 460#define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
b52f00e6 461 EMIT2(X86_JA, OFFSET2); /* ja out */
b52f00e6 462 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
ebf7d1f5 463 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
b52f00e6 464
2a36f0b9 465 /* prog = array->ptrs[index]; */
0d4ddce3 466 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
2a36f0b9 467 offsetof(struct bpf_array, ptrs));
b52f00e6 468
a2c7a983
IM
469 /*
470 * if (prog == NULL)
471 * goto out;
b52f00e6 472 */
ebf7d1f5
MF
473 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
474#define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
b52f00e6 475 EMIT2(X86_JE, OFFSET3); /* je out */
b52f00e6 476
ebf7d1f5
MF
477 *pprog = prog;
478 pop_callee_regs(pprog, callee_regs_used);
479 prog = *pprog;
480
481 EMIT1(0x58); /* pop rax */
4d0b8c0b
MF
482 if (stack_depth)
483 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
484 round_up(stack_depth, 8));
ebf7d1f5
MF
485
486 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
0d4ddce3 487 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
b52f00e6 488 offsetof(struct bpf_prog, bpf_func));
ebf7d1f5
MF
489 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
490 X86_TAIL_CALL_OFFSET);
a2c7a983 491 /*
0d4ddce3 492 * Now we're ready to jump into next BPF program
b52f00e6 493 * rdi == ctx (1st arg)
ebf7d1f5 494 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
b52f00e6 495 */
0d4ddce3 496 RETPOLINE_RCX_BPF_JIT();
b52f00e6
AS
497
498 /* out: */
b52f00e6
AS
499 *pprog = prog;
500}
501
428d5df1 502static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
ebf7d1f5
MF
503 u8 **pprog, int addr, u8 *image,
504 bool *callee_regs_used, u32 stack_depth)
428d5df1 505{
ebf7d1f5 506 int tcc_off = -4 - round_up(stack_depth, 8);
428d5df1 507 u8 *prog = *pprog;
ebf7d1f5 508 int pop_bytes = 0;
4d0b8c0b 509 int off1 = 20;
ebf7d1f5 510 int poke_off;
428d5df1 511
ebf7d1f5
MF
512 /* count the additional bytes used for popping callee regs to stack
513 * that need to be taken into account for jump offset that is used for
514 * bailing out from of the tail call when limit is reached
515 */
516 pop_bytes = get_pop_bytes(callee_regs_used);
517 off1 += pop_bytes;
518
519 /*
520 * total bytes for:
521 * - nop5/ jmpq $off
522 * - pop callee regs
4d0b8c0b 523 * - sub rsp, $val if depth > 0
ebf7d1f5
MF
524 * - pop rax
525 */
4d0b8c0b
MF
526 poke_off = X86_PATCH_SIZE + pop_bytes + 1;
527 if (stack_depth) {
528 poke_off += 7;
529 off1 += 7;
530 }
ebf7d1f5 531
428d5df1
DB
532 /*
533 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
534 * goto out;
535 */
ebf7d1f5 536 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
428d5df1 537 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
ebf7d1f5 538 EMIT2(X86_JA, off1); /* ja out */
428d5df1 539 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
ebf7d1f5 540 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
428d5df1 541
ebf7d1f5
MF
542 poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
543 poke->adj_off = X86_TAIL_CALL_OFFSET;
cf71b174 544 poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
ebf7d1f5
MF
545 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
546
547 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
548 poke->tailcall_bypass);
549
550 *pprog = prog;
551 pop_callee_regs(pprog, callee_regs_used);
552 prog = *pprog;
553 EMIT1(0x58); /* pop rax */
4d0b8c0b
MF
554 if (stack_depth)
555 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
428d5df1 556
a89dfde3 557 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
428d5df1
DB
558 prog += X86_PATCH_SIZE;
559 /* out: */
560
561 *pprog = prog;
562}
563
564static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
565{
428d5df1
DB
566 struct bpf_jit_poke_descriptor *poke;
567 struct bpf_array *array;
568 struct bpf_prog *target;
569 int i, ret;
570
571 for (i = 0; i < prog->aux->size_poke_tab; i++) {
572 poke = &prog->aux->poke_tab[i];
cf71b174 573 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
428d5df1
DB
574
575 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
576 continue;
577
578 array = container_of(poke->tail_call.map, struct bpf_array, map);
579 mutex_lock(&array->aux->poke_mutex);
580 target = array->ptrs[poke->tail_call.key];
581 if (target) {
582 /* Plain memcpy is used when image is not live yet
583 * and still not locked as read-only. Once poke
cf71b174
MF
584 * location is active (poke->tailcall_target_stable),
585 * any parallel bpf_arch_text_poke() might occur
586 * still on the read-write image until we finally
587 * locked it as read-only. Both modifications on
588 * the given image are under text_mutex to avoid
589 * interference.
428d5df1 590 */
cf71b174
MF
591 ret = __bpf_arch_text_poke(poke->tailcall_target,
592 BPF_MOD_JUMP, NULL,
428d5df1
DB
593 (u8 *)target->bpf_func +
594 poke->adj_off, false);
595 BUG_ON(ret < 0);
ebf7d1f5
MF
596 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
597 BPF_MOD_JUMP,
598 (u8 *)poke->tailcall_target +
599 X86_PATCH_SIZE, NULL, false);
600 BUG_ON(ret < 0);
428d5df1 601 }
cf71b174 602 WRITE_ONCE(poke->tailcall_target_stable, true);
428d5df1
DB
603 mutex_unlock(&array->aux->poke_mutex);
604 }
605}
606
6fe8b9c1
DB
607static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
608 u32 dst_reg, const u32 imm32)
609{
610 u8 *prog = *pprog;
611 u8 b1, b2, b3;
6fe8b9c1 612
a2c7a983
IM
613 /*
614 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
6fe8b9c1
DB
615 * (which zero-extends imm32) to save 2 bytes.
616 */
617 if (sign_propagate && (s32)imm32 < 0) {
618 /* 'mov %rax, imm32' sign extends imm32 */
619 b1 = add_1mod(0x48, dst_reg);
620 b2 = 0xC7;
621 b3 = 0xC0;
622 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
623 goto done;
624 }
625
a2c7a983
IM
626 /*
627 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
6fe8b9c1
DB
628 * to save 3 bytes.
629 */
630 if (imm32 == 0) {
631 if (is_ereg(dst_reg))
632 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
633 b2 = 0x31; /* xor */
634 b3 = 0xC0;
635 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
636 goto done;
637 }
638
639 /* mov %eax, imm32 */
640 if (is_ereg(dst_reg))
641 EMIT1(add_1mod(0x40, dst_reg));
642 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
643done:
644 *pprog = prog;
645}
646
647static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
648 const u32 imm32_hi, const u32 imm32_lo)
649{
650 u8 *prog = *pprog;
6fe8b9c1
DB
651
652 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
a2c7a983
IM
653 /*
654 * For emitting plain u32, where sign bit must not be
6fe8b9c1
DB
655 * propagated LLVM tends to load imm64 over mov32
656 * directly, so save couple of bytes by just doing
657 * 'mov %eax, imm32' instead.
658 */
659 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
660 } else {
661 /* movabsq %rax, imm64 */
662 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
663 EMIT(imm32_lo, 4);
664 EMIT(imm32_hi, 4);
665 }
666
667 *pprog = prog;
668}
669
4c38e2f3
DB
670static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
671{
672 u8 *prog = *pprog;
4c38e2f3
DB
673
674 if (is64) {
675 /* mov dst, src */
676 EMIT_mov(dst_reg, src_reg);
677 } else {
678 /* mov32 dst, src */
679 if (is_ereg(dst_reg) || is_ereg(src_reg))
680 EMIT1(add_2mod(0x40, dst_reg, src_reg));
681 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
682 }
683
684 *pprog = prog;
685}
686
11c11d07
BJ
687/* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
688static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
689{
690 u8 *prog = *pprog;
11c11d07
BJ
691
692 if (is_imm8(off)) {
693 /* 1-byte signed displacement.
694 *
695 * If off == 0 we could skip this and save one extra byte, but
696 * special case of x86 R13 which always needs an offset is not
697 * worth the hassle
698 */
699 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
700 } else {
701 /* 4-byte signed displacement */
702 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
703 }
704 *pprog = prog;
705}
706
74007cfc
BJ
707/*
708 * Emit a REX byte if it will be necessary to address these registers
709 */
710static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
711{
712 u8 *prog = *pprog;
74007cfc
BJ
713
714 if (is64)
715 EMIT1(add_2mod(0x48, dst_reg, src_reg));
716 else if (is_ereg(dst_reg) || is_ereg(src_reg))
717 EMIT1(add_2mod(0x40, dst_reg, src_reg));
718 *pprog = prog;
719}
720
3b2744e6
AS
721/* LDX: dst_reg = *(u8*)(src_reg + off) */
722static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
723{
724 u8 *prog = *pprog;
3b2744e6
AS
725
726 switch (size) {
727 case BPF_B:
728 /* Emit 'movzx rax, byte ptr [rax + off]' */
729 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
730 break;
731 case BPF_H:
732 /* Emit 'movzx rax, word ptr [rax + off]' */
733 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
734 break;
735 case BPF_W:
736 /* Emit 'mov eax, dword ptr [rax+0x14]' */
737 if (is_ereg(dst_reg) || is_ereg(src_reg))
738 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
739 else
740 EMIT1(0x8B);
741 break;
742 case BPF_DW:
743 /* Emit 'mov rax, qword ptr [rax+0x14]' */
744 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
745 break;
746 }
11c11d07 747 emit_insn_suffix(&prog, src_reg, dst_reg, off);
3b2744e6
AS
748 *pprog = prog;
749}
750
751/* STX: *(u8*)(dst_reg + off) = src_reg */
752static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
753{
754 u8 *prog = *pprog;
3b2744e6
AS
755
756 switch (size) {
757 case BPF_B:
758 /* Emit 'mov byte ptr [rax + off], al' */
aee194b1
LN
759 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
760 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
3b2744e6
AS
761 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
762 else
763 EMIT1(0x88);
764 break;
765 case BPF_H:
766 if (is_ereg(dst_reg) || is_ereg(src_reg))
767 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
768 else
769 EMIT2(0x66, 0x89);
770 break;
771 case BPF_W:
772 if (is_ereg(dst_reg) || is_ereg(src_reg))
773 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
774 else
775 EMIT1(0x89);
776 break;
777 case BPF_DW:
778 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
779 break;
780 }
11c11d07 781 emit_insn_suffix(&prog, dst_reg, src_reg, off);
3b2744e6
AS
782 *pprog = prog;
783}
784
91c960b0
BJ
785static int emit_atomic(u8 **pprog, u8 atomic_op,
786 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
787{
788 u8 *prog = *pprog;
91c960b0
BJ
789
790 EMIT1(0xF0); /* lock prefix */
791
792 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
793
794 /* emit opcode */
795 switch (atomic_op) {
796 case BPF_ADD:
981f94c3
BJ
797 case BPF_SUB:
798 case BPF_AND:
799 case BPF_OR:
800 case BPF_XOR:
91c960b0
BJ
801 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
802 EMIT1(simple_alu_opcodes[atomic_op]);
803 break;
5ca419f2
BJ
804 case BPF_ADD | BPF_FETCH:
805 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
806 EMIT2(0x0F, 0xC1);
807 break;
5ffa2550
BJ
808 case BPF_XCHG:
809 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
810 EMIT1(0x87);
811 break;
812 case BPF_CMPXCHG:
813 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
814 EMIT2(0x0F, 0xB1);
815 break;
91c960b0
BJ
816 default:
817 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
818 return -EFAULT;
819 }
820
821 emit_insn_suffix(&prog, dst_reg, src_reg, off);
822
823 *pprog = prog;
824 return 0;
825}
826
3dec541b
AS
827static bool ex_handler_bpf(const struct exception_table_entry *x,
828 struct pt_regs *regs, int trapnr,
829 unsigned long error_code, unsigned long fault_addr)
830{
831 u32 reg = x->fixup >> 8;
832
833 /* jump over faulting load and clear dest register */
834 *(unsigned long *)((void *)regs + reg) = 0;
835 regs->ip += x->fixup & 0xff;
836 return true;
837}
838
ebf7d1f5
MF
839static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
840 bool *regs_used, bool *tail_call_seen)
841{
842 int i;
843
844 for (i = 1; i <= insn_cnt; i++, insn++) {
845 if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
846 *tail_call_seen = true;
847 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
848 regs_used[0] = true;
849 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
850 regs_used[1] = true;
851 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
852 regs_used[2] = true;
853 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
854 regs_used[3] = true;
855 }
856}
857
ced50fc4 858static void emit_nops(u8 **pprog, int len)
93c5aecc
GL
859{
860 u8 *prog = *pprog;
ced50fc4 861 int i, noplen;
93c5aecc
GL
862
863 while (len > 0) {
864 noplen = len;
865
866 if (noplen > ASM_NOP_MAX)
867 noplen = ASM_NOP_MAX;
868
869 for (i = 0; i < noplen; i++)
a89dfde3 870 EMIT1(x86_nops[noplen][i]);
93c5aecc
GL
871 len -= noplen;
872 }
873
874 *pprog = prog;
93c5aecc
GL
875}
876
877#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
878
b52f00e6 879static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
93c5aecc 880 int oldproglen, struct jit_context *ctx, bool jmp_padding)
b52f00e6 881{
ebf7d1f5 882 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
b52f00e6 883 struct bpf_insn *insn = bpf_prog->insnsi;
ebf7d1f5 884 bool callee_regs_used[4] = {};
b52f00e6 885 int insn_cnt = bpf_prog->len;
ebf7d1f5 886 bool tail_call_seen = false;
b52f00e6
AS
887 bool seen_exit = false;
888 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
ced50fc4 889 int i, excnt = 0;
93c5aecc 890 int ilen, proglen = 0;
b52f00e6 891 u8 *prog = temp;
91c960b0 892 int err;
b52f00e6 893
ebf7d1f5
MF
894 detect_reg_usage(insn, insn_cnt, callee_regs_used,
895 &tail_call_seen);
896
897 /* tail call's presence in current prog implies it is reachable */
898 tail_call_reachable |= tail_call_seen;
899
08691752 900 emit_prologue(&prog, bpf_prog->aux->stack_depth,
ebf7d1f5
MF
901 bpf_prog_was_classic(bpf_prog), tail_call_reachable,
902 bpf_prog->aux->func_idx != 0);
903 push_callee_regs(&prog, callee_regs_used);
93c5aecc
GL
904
905 ilen = prog - temp;
906 if (image)
907 memcpy(image + proglen, temp, ilen);
908 proglen += ilen;
909 addrs[0] = proglen;
910 prog = temp;
b52f00e6 911
7c2e988f 912 for (i = 1; i <= insn_cnt; i++, insn++) {
e430f34e
AS
913 const s32 imm32 = insn->imm;
914 u32 dst_reg = insn->dst_reg;
915 u32 src_reg = insn->src_reg;
6fe8b9c1 916 u8 b2 = 0, b3 = 0;
4c5de127 917 u8 *start_of_ldx;
62258278
AS
918 s64 jmp_offset;
919 u8 jmp_cond;
62258278 920 u8 *func;
93c5aecc 921 int nops;
62258278
AS
922
923 switch (insn->code) {
924 /* ALU */
925 case BPF_ALU | BPF_ADD | BPF_X:
926 case BPF_ALU | BPF_SUB | BPF_X:
927 case BPF_ALU | BPF_AND | BPF_X:
928 case BPF_ALU | BPF_OR | BPF_X:
929 case BPF_ALU | BPF_XOR | BPF_X:
930 case BPF_ALU64 | BPF_ADD | BPF_X:
931 case BPF_ALU64 | BPF_SUB | BPF_X:
932 case BPF_ALU64 | BPF_AND | BPF_X:
933 case BPF_ALU64 | BPF_OR | BPF_X:
934 case BPF_ALU64 | BPF_XOR | BPF_X:
74007cfc
BJ
935 maybe_emit_mod(&prog, dst_reg, src_reg,
936 BPF_CLASS(insn->code) == BPF_ALU64);
e5f02cac 937 b2 = simple_alu_opcodes[BPF_OP(insn->code)];
e430f34e 938 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
62258278 939 break;
0a14842f 940
62258278 941 case BPF_ALU64 | BPF_MOV | BPF_X:
62258278 942 case BPF_ALU | BPF_MOV | BPF_X:
4c38e2f3
DB
943 emit_mov_reg(&prog,
944 BPF_CLASS(insn->code) == BPF_ALU64,
945 dst_reg, src_reg);
62258278 946 break;
0a14842f 947
e430f34e 948 /* neg dst */
62258278
AS
949 case BPF_ALU | BPF_NEG:
950 case BPF_ALU64 | BPF_NEG:
951 if (BPF_CLASS(insn->code) == BPF_ALU64)
e430f34e
AS
952 EMIT1(add_1mod(0x48, dst_reg));
953 else if (is_ereg(dst_reg))
954 EMIT1(add_1mod(0x40, dst_reg));
955 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
62258278
AS
956 break;
957
958 case BPF_ALU | BPF_ADD | BPF_K:
959 case BPF_ALU | BPF_SUB | BPF_K:
960 case BPF_ALU | BPF_AND | BPF_K:
961 case BPF_ALU | BPF_OR | BPF_K:
962 case BPF_ALU | BPF_XOR | BPF_K:
963 case BPF_ALU64 | BPF_ADD | BPF_K:
964 case BPF_ALU64 | BPF_SUB | BPF_K:
965 case BPF_ALU64 | BPF_AND | BPF_K:
966 case BPF_ALU64 | BPF_OR | BPF_K:
967 case BPF_ALU64 | BPF_XOR | BPF_K:
968 if (BPF_CLASS(insn->code) == BPF_ALU64)
e430f34e
AS
969 EMIT1(add_1mod(0x48, dst_reg));
970 else if (is_ereg(dst_reg))
971 EMIT1(add_1mod(0x40, dst_reg));
62258278 972
a2c7a983
IM
973 /*
974 * b3 holds 'normal' opcode, b2 short form only valid
de0a444d
DB
975 * in case dst is eax/rax.
976 */
62258278 977 switch (BPF_OP(insn->code)) {
de0a444d
DB
978 case BPF_ADD:
979 b3 = 0xC0;
980 b2 = 0x05;
981 break;
982 case BPF_SUB:
983 b3 = 0xE8;
984 b2 = 0x2D;
985 break;
986 case BPF_AND:
987 b3 = 0xE0;
988 b2 = 0x25;
989 break;
990 case BPF_OR:
991 b3 = 0xC8;
992 b2 = 0x0D;
993 break;
994 case BPF_XOR:
995 b3 = 0xF0;
996 b2 = 0x35;
997 break;
62258278
AS
998 }
999
e430f34e
AS
1000 if (is_imm8(imm32))
1001 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
de0a444d
DB
1002 else if (is_axreg(dst_reg))
1003 EMIT1_off32(b2, imm32);
62258278 1004 else
e430f34e 1005 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
62258278
AS
1006 break;
1007
1008 case BPF_ALU64 | BPF_MOV | BPF_K:
62258278 1009 case BPF_ALU | BPF_MOV | BPF_K:
6fe8b9c1
DB
1010 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1011 dst_reg, imm32);
62258278
AS
1012 break;
1013
02ab695b 1014 case BPF_LD | BPF_IMM | BPF_DW:
6fe8b9c1 1015 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
02ab695b
AS
1016 insn++;
1017 i++;
1018 break;
1019
e430f34e 1020 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
62258278
AS
1021 case BPF_ALU | BPF_MOD | BPF_X:
1022 case BPF_ALU | BPF_DIV | BPF_X:
1023 case BPF_ALU | BPF_MOD | BPF_K:
1024 case BPF_ALU | BPF_DIV | BPF_K:
1025 case BPF_ALU64 | BPF_MOD | BPF_X:
1026 case BPF_ALU64 | BPF_DIV | BPF_X:
1027 case BPF_ALU64 | BPF_MOD | BPF_K:
1028 case BPF_ALU64 | BPF_DIV | BPF_K:
1029 EMIT1(0x50); /* push rax */
1030 EMIT1(0x52); /* push rdx */
1031
1032 if (BPF_SRC(insn->code) == BPF_X)
e430f34e
AS
1033 /* mov r11, src_reg */
1034 EMIT_mov(AUX_REG, src_reg);
62258278 1035 else
e430f34e
AS
1036 /* mov r11, imm32 */
1037 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
62258278 1038
e430f34e
AS
1039 /* mov rax, dst_reg */
1040 EMIT_mov(BPF_REG_0, dst_reg);
62258278 1041
a2c7a983
IM
1042 /*
1043 * xor edx, edx
62258278
AS
1044 * equivalent to 'xor rdx, rdx', but one byte less
1045 */
1046 EMIT2(0x31, 0xd2);
1047
62258278
AS
1048 if (BPF_CLASS(insn->code) == BPF_ALU64)
1049 /* div r11 */
1050 EMIT3(0x49, 0xF7, 0xF3);
1051 else
1052 /* div r11d */
1053 EMIT3(0x41, 0xF7, 0xF3);
1054
1055 if (BPF_OP(insn->code) == BPF_MOD)
1056 /* mov r11, rdx */
1057 EMIT3(0x49, 0x89, 0xD3);
1058 else
1059 /* mov r11, rax */
1060 EMIT3(0x49, 0x89, 0xC3);
1061
1062 EMIT1(0x5A); /* pop rdx */
1063 EMIT1(0x58); /* pop rax */
1064
e430f34e
AS
1065 /* mov dst_reg, r11 */
1066 EMIT_mov(dst_reg, AUX_REG);
62258278
AS
1067 break;
1068
1069 case BPF_ALU | BPF_MUL | BPF_K:
1070 case BPF_ALU | BPF_MUL | BPF_X:
1071 case BPF_ALU64 | BPF_MUL | BPF_K:
1072 case BPF_ALU64 | BPF_MUL | BPF_X:
4c38e2f3
DB
1073 {
1074 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1075
d806a0cf
DB
1076 if (dst_reg != BPF_REG_0)
1077 EMIT1(0x50); /* push rax */
1078 if (dst_reg != BPF_REG_3)
1079 EMIT1(0x52); /* push rdx */
62258278 1080
e430f34e
AS
1081 /* mov r11, dst_reg */
1082 EMIT_mov(AUX_REG, dst_reg);
62258278
AS
1083
1084 if (BPF_SRC(insn->code) == BPF_X)
4c38e2f3 1085 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
62258278 1086 else
4c38e2f3 1087 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
62258278 1088
4c38e2f3 1089 if (is64)
62258278
AS
1090 EMIT1(add_1mod(0x48, AUX_REG));
1091 else if (is_ereg(AUX_REG))
1092 EMIT1(add_1mod(0x40, AUX_REG));
1093 /* mul(q) r11 */
1094 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
1095
d806a0cf
DB
1096 if (dst_reg != BPF_REG_3)
1097 EMIT1(0x5A); /* pop rdx */
1098 if (dst_reg != BPF_REG_0) {
1099 /* mov dst_reg, rax */
1100 EMIT_mov(dst_reg, BPF_REG_0);
1101 EMIT1(0x58); /* pop rax */
1102 }
62258278 1103 break;
4c38e2f3 1104 }
a2c7a983 1105 /* Shifts */
62258278
AS
1106 case BPF_ALU | BPF_LSH | BPF_K:
1107 case BPF_ALU | BPF_RSH | BPF_K:
1108 case BPF_ALU | BPF_ARSH | BPF_K:
1109 case BPF_ALU64 | BPF_LSH | BPF_K:
1110 case BPF_ALU64 | BPF_RSH | BPF_K:
1111 case BPF_ALU64 | BPF_ARSH | BPF_K:
1112 if (BPF_CLASS(insn->code) == BPF_ALU64)
e430f34e
AS
1113 EMIT1(add_1mod(0x48, dst_reg));
1114 else if (is_ereg(dst_reg))
1115 EMIT1(add_1mod(0x40, dst_reg));
62258278 1116
e5f02cac 1117 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
88e69a1f
DB
1118 if (imm32 == 1)
1119 EMIT2(0xD1, add_1reg(b3, dst_reg));
1120 else
1121 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
62258278
AS
1122 break;
1123
72b603ee
AS
1124 case BPF_ALU | BPF_LSH | BPF_X:
1125 case BPF_ALU | BPF_RSH | BPF_X:
1126 case BPF_ALU | BPF_ARSH | BPF_X:
1127 case BPF_ALU64 | BPF_LSH | BPF_X:
1128 case BPF_ALU64 | BPF_RSH | BPF_X:
1129 case BPF_ALU64 | BPF_ARSH | BPF_X:
1130
a2c7a983 1131 /* Check for bad case when dst_reg == rcx */
72b603ee
AS
1132 if (dst_reg == BPF_REG_4) {
1133 /* mov r11, dst_reg */
1134 EMIT_mov(AUX_REG, dst_reg);
1135 dst_reg = AUX_REG;
1136 }
1137
1138 if (src_reg != BPF_REG_4) { /* common case */
1139 EMIT1(0x51); /* push rcx */
1140
1141 /* mov rcx, src_reg */
1142 EMIT_mov(BPF_REG_4, src_reg);
1143 }
1144
1145 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1146 if (BPF_CLASS(insn->code) == BPF_ALU64)
1147 EMIT1(add_1mod(0x48, dst_reg));
1148 else if (is_ereg(dst_reg))
1149 EMIT1(add_1mod(0x40, dst_reg));
1150
e5f02cac 1151 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
72b603ee
AS
1152 EMIT2(0xD3, add_1reg(b3, dst_reg));
1153
1154 if (src_reg != BPF_REG_4)
1155 EMIT1(0x59); /* pop rcx */
1156
1157 if (insn->dst_reg == BPF_REG_4)
1158 /* mov dst_reg, r11 */
1159 EMIT_mov(insn->dst_reg, AUX_REG);
1160 break;
1161
62258278 1162 case BPF_ALU | BPF_END | BPF_FROM_BE:
e430f34e 1163 switch (imm32) {
62258278 1164 case 16:
a2c7a983 1165 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
62258278 1166 EMIT1(0x66);
e430f34e 1167 if (is_ereg(dst_reg))
62258278 1168 EMIT1(0x41);
e430f34e 1169 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
343f845b 1170
a2c7a983 1171 /* Emit 'movzwl eax, ax' */
343f845b
AS
1172 if (is_ereg(dst_reg))
1173 EMIT3(0x45, 0x0F, 0xB7);
1174 else
1175 EMIT2(0x0F, 0xB7);
1176 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
62258278
AS
1177 break;
1178 case 32:
a2c7a983 1179 /* Emit 'bswap eax' to swap lower 4 bytes */
e430f34e 1180 if (is_ereg(dst_reg))
62258278 1181 EMIT2(0x41, 0x0F);
0a14842f 1182 else
62258278 1183 EMIT1(0x0F);
e430f34e 1184 EMIT1(add_1reg(0xC8, dst_reg));
0a14842f 1185 break;
62258278 1186 case 64:
a2c7a983 1187 /* Emit 'bswap rax' to swap 8 bytes */
e430f34e
AS
1188 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1189 add_1reg(0xC8, dst_reg));
3b58908a
ED
1190 break;
1191 }
62258278
AS
1192 break;
1193
1194 case BPF_ALU | BPF_END | BPF_FROM_LE:
343f845b
AS
1195 switch (imm32) {
1196 case 16:
a2c7a983
IM
1197 /*
1198 * Emit 'movzwl eax, ax' to zero extend 16-bit
343f845b
AS
1199 * into 64 bit
1200 */
1201 if (is_ereg(dst_reg))
1202 EMIT3(0x45, 0x0F, 0xB7);
1203 else
1204 EMIT2(0x0F, 0xB7);
1205 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1206 break;
1207 case 32:
a2c7a983 1208 /* Emit 'mov eax, eax' to clear upper 32-bits */
343f845b
AS
1209 if (is_ereg(dst_reg))
1210 EMIT1(0x45);
1211 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1212 break;
1213 case 64:
1214 /* nop */
1215 break;
1216 }
62258278
AS
1217 break;
1218
e430f34e 1219 /* ST: *(u8*)(dst_reg + off) = imm */
62258278 1220 case BPF_ST | BPF_MEM | BPF_B:
e430f34e 1221 if (is_ereg(dst_reg))
62258278
AS
1222 EMIT2(0x41, 0xC6);
1223 else
1224 EMIT1(0xC6);
1225 goto st;
1226 case BPF_ST | BPF_MEM | BPF_H:
e430f34e 1227 if (is_ereg(dst_reg))
62258278
AS
1228 EMIT3(0x66, 0x41, 0xC7);
1229 else
1230 EMIT2(0x66, 0xC7);
1231 goto st;
1232 case BPF_ST | BPF_MEM | BPF_W:
e430f34e 1233 if (is_ereg(dst_reg))
62258278
AS
1234 EMIT2(0x41, 0xC7);
1235 else
1236 EMIT1(0xC7);
1237 goto st;
1238 case BPF_ST | BPF_MEM | BPF_DW:
e430f34e 1239 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
62258278
AS
1240
1241st: if (is_imm8(insn->off))
e430f34e 1242 EMIT2(add_1reg(0x40, dst_reg), insn->off);
62258278 1243 else
e430f34e 1244 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
62258278 1245
e430f34e 1246 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
62258278
AS
1247 break;
1248
e430f34e 1249 /* STX: *(u8*)(dst_reg + off) = src_reg */
62258278 1250 case BPF_STX | BPF_MEM | BPF_B:
62258278 1251 case BPF_STX | BPF_MEM | BPF_H:
62258278 1252 case BPF_STX | BPF_MEM | BPF_W:
62258278 1253 case BPF_STX | BPF_MEM | BPF_DW:
3b2744e6 1254 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
62258278
AS
1255 break;
1256
e430f34e 1257 /* LDX: dst_reg = *(u8*)(src_reg + off) */
62258278 1258 case BPF_LDX | BPF_MEM | BPF_B:
3dec541b 1259 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
62258278 1260 case BPF_LDX | BPF_MEM | BPF_H:
3dec541b 1261 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
62258278 1262 case BPF_LDX | BPF_MEM | BPF_W:
3dec541b 1263 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
62258278 1264 case BPF_LDX | BPF_MEM | BPF_DW:
3dec541b 1265 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
4c5de127
AS
1266 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1267 /* test src_reg, src_reg */
1268 maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
1269 EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
1270 /* jne start_of_ldx */
1271 EMIT2(X86_JNE, 0);
1272 /* xor dst_reg, dst_reg */
1273 emit_mov_imm32(&prog, false, dst_reg, 0);
1274 /* jmp byte_after_ldx */
1275 EMIT2(0xEB, 0);
1276
1277 /* populate jmp_offset for JNE above */
1278 temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
1279 start_of_ldx = prog;
1280 }
3b2744e6 1281 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
3dec541b
AS
1282 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1283 struct exception_table_entry *ex;
1284 u8 *_insn = image + proglen;
1285 s64 delta;
1286
4c5de127
AS
1287 /* populate jmp_offset for JMP above */
1288 start_of_ldx[-1] = prog - start_of_ldx;
1289
3dec541b
AS
1290 if (!bpf_prog->aux->extable)
1291 break;
1292
1293 if (excnt >= bpf_prog->aux->num_exentries) {
1294 pr_err("ex gen bug\n");
1295 return -EFAULT;
1296 }
1297 ex = &bpf_prog->aux->extable[excnt++];
1298
1299 delta = _insn - (u8 *)&ex->insn;
1300 if (!is_simm32(delta)) {
1301 pr_err("extable->insn doesn't fit into 32-bit\n");
1302 return -EFAULT;
1303 }
1304 ex->insn = delta;
1305
1306 delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
1307 if (!is_simm32(delta)) {
1308 pr_err("extable->handler doesn't fit into 32-bit\n");
1309 return -EFAULT;
1310 }
1311 ex->handler = delta;
1312
1313 if (dst_reg > BPF_REG_9) {
1314 pr_err("verifier error\n");
1315 return -EFAULT;
1316 }
1317 /*
1318 * Compute size of x86 insn and its target dest x86 register.
1319 * ex_handler_bpf() will use lower 8 bits to adjust
1320 * pt_regs->ip to jump over this x86 instruction
1321 * and upper bits to figure out which pt_regs to zero out.
1322 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1323 * of 4 bytes will be ignored and rbx will be zero inited.
1324 */
1325 ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
1326 }
62258278
AS
1327 break;
1328
91c960b0
BJ
1329 case BPF_STX | BPF_ATOMIC | BPF_W:
1330 case BPF_STX | BPF_ATOMIC | BPF_DW:
981f94c3
BJ
1331 if (insn->imm == (BPF_AND | BPF_FETCH) ||
1332 insn->imm == (BPF_OR | BPF_FETCH) ||
1333 insn->imm == (BPF_XOR | BPF_FETCH)) {
1334 u8 *branch_target;
1335 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
b29dd96b 1336 u32 real_src_reg = src_reg;
981f94c3
BJ
1337
1338 /*
1339 * Can't be implemented with a single x86 insn.
1340 * Need to do a CMPXCHG loop.
1341 */
1342
1343 /* Will need RAX as a CMPXCHG operand so save R0 */
1344 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
b29dd96b
BJ
1345 if (src_reg == BPF_REG_0)
1346 real_src_reg = BPF_REG_AX;
1347
981f94c3
BJ
1348 branch_target = prog;
1349 /* Load old value */
1350 emit_ldx(&prog, BPF_SIZE(insn->code),
1351 BPF_REG_0, dst_reg, insn->off);
1352 /*
1353 * Perform the (commutative) operation locally,
1354 * put the result in the AUX_REG.
1355 */
1356 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
b29dd96b 1357 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
981f94c3 1358 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
b29dd96b 1359 add_2reg(0xC0, AUX_REG, real_src_reg));
981f94c3
BJ
1360 /* Attempt to swap in new value */
1361 err = emit_atomic(&prog, BPF_CMPXCHG,
1362 dst_reg, AUX_REG, insn->off,
1363 BPF_SIZE(insn->code));
1364 if (WARN_ON(err))
1365 return err;
1366 /*
1367 * ZF tells us whether we won the race. If it's
1368 * cleared we need to try again.
1369 */
1370 EMIT2(X86_JNE, -(prog - branch_target) - 2);
1371 /* Return the pre-modification value */
b29dd96b 1372 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
981f94c3
BJ
1373 /* Restore R0 after clobbering RAX */
1374 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1375 break;
1376
1377 }
1378
91c960b0 1379 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
981f94c3 1380 insn->off, BPF_SIZE(insn->code));
91c960b0
BJ
1381 if (err)
1382 return err;
62258278
AS
1383 break;
1384
1385 /* call */
1386 case BPF_JMP | BPF_CALL:
e430f34e 1387 func = (u8 *) __bpf_call_base + imm32;
ebf7d1f5
MF
1388 if (tail_call_reachable) {
1389 EMIT3_off32(0x48, 0x8B, 0x85,
1390 -(bpf_prog->aux->stack_depth + 8));
1391 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1392 return -EINVAL;
1393 } else {
1394 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1395 return -EINVAL;
1396 }
62258278
AS
1397 break;
1398
71189fa9 1399 case BPF_JMP | BPF_TAIL_CALL:
428d5df1
DB
1400 if (imm32)
1401 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
ebf7d1f5
MF
1402 &prog, addrs[i], image,
1403 callee_regs_used,
1404 bpf_prog->aux->stack_depth);
428d5df1 1405 else
ebf7d1f5
MF
1406 emit_bpf_tail_call_indirect(&prog,
1407 callee_regs_used,
1408 bpf_prog->aux->stack_depth);
b52f00e6
AS
1409 break;
1410
62258278
AS
1411 /* cond jump */
1412 case BPF_JMP | BPF_JEQ | BPF_X:
1413 case BPF_JMP | BPF_JNE | BPF_X:
1414 case BPF_JMP | BPF_JGT | BPF_X:
52afc51e 1415 case BPF_JMP | BPF_JLT | BPF_X:
62258278 1416 case BPF_JMP | BPF_JGE | BPF_X:
52afc51e 1417 case BPF_JMP | BPF_JLE | BPF_X:
62258278 1418 case BPF_JMP | BPF_JSGT | BPF_X:
52afc51e 1419 case BPF_JMP | BPF_JSLT | BPF_X:
62258278 1420 case BPF_JMP | BPF_JSGE | BPF_X:
52afc51e 1421 case BPF_JMP | BPF_JSLE | BPF_X:
3f5d6525
JW
1422 case BPF_JMP32 | BPF_JEQ | BPF_X:
1423 case BPF_JMP32 | BPF_JNE | BPF_X:
1424 case BPF_JMP32 | BPF_JGT | BPF_X:
1425 case BPF_JMP32 | BPF_JLT | BPF_X:
1426 case BPF_JMP32 | BPF_JGE | BPF_X:
1427 case BPF_JMP32 | BPF_JLE | BPF_X:
1428 case BPF_JMP32 | BPF_JSGT | BPF_X:
1429 case BPF_JMP32 | BPF_JSLT | BPF_X:
1430 case BPF_JMP32 | BPF_JSGE | BPF_X:
1431 case BPF_JMP32 | BPF_JSLE | BPF_X:
e430f34e 1432 /* cmp dst_reg, src_reg */
74007cfc
BJ
1433 maybe_emit_mod(&prog, dst_reg, src_reg,
1434 BPF_CLASS(insn->code) == BPF_JMP);
3f5d6525 1435 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
62258278
AS
1436 goto emit_cond_jmp;
1437
1438 case BPF_JMP | BPF_JSET | BPF_X:
3f5d6525 1439 case BPF_JMP32 | BPF_JSET | BPF_X:
e430f34e 1440 /* test dst_reg, src_reg */
74007cfc
BJ
1441 maybe_emit_mod(&prog, dst_reg, src_reg,
1442 BPF_CLASS(insn->code) == BPF_JMP);
3f5d6525 1443 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
62258278
AS
1444 goto emit_cond_jmp;
1445
1446 case BPF_JMP | BPF_JSET | BPF_K:
3f5d6525 1447 case BPF_JMP32 | BPF_JSET | BPF_K:
e430f34e 1448 /* test dst_reg, imm32 */
3f5d6525
JW
1449 if (BPF_CLASS(insn->code) == BPF_JMP)
1450 EMIT1(add_1mod(0x48, dst_reg));
1451 else if (is_ereg(dst_reg))
1452 EMIT1(add_1mod(0x40, dst_reg));
e430f34e 1453 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
62258278
AS
1454 goto emit_cond_jmp;
1455
1456 case BPF_JMP | BPF_JEQ | BPF_K:
1457 case BPF_JMP | BPF_JNE | BPF_K:
1458 case BPF_JMP | BPF_JGT | BPF_K:
52afc51e 1459 case BPF_JMP | BPF_JLT | BPF_K:
62258278 1460 case BPF_JMP | BPF_JGE | BPF_K:
52afc51e 1461 case BPF_JMP | BPF_JLE | BPF_K:
62258278 1462 case BPF_JMP | BPF_JSGT | BPF_K:
52afc51e 1463 case BPF_JMP | BPF_JSLT | BPF_K:
62258278 1464 case BPF_JMP | BPF_JSGE | BPF_K:
52afc51e 1465 case BPF_JMP | BPF_JSLE | BPF_K:
3f5d6525
JW
1466 case BPF_JMP32 | BPF_JEQ | BPF_K:
1467 case BPF_JMP32 | BPF_JNE | BPF_K:
1468 case BPF_JMP32 | BPF_JGT | BPF_K:
1469 case BPF_JMP32 | BPF_JLT | BPF_K:
1470 case BPF_JMP32 | BPF_JGE | BPF_K:
1471 case BPF_JMP32 | BPF_JLE | BPF_K:
1472 case BPF_JMP32 | BPF_JSGT | BPF_K:
1473 case BPF_JMP32 | BPF_JSLT | BPF_K:
1474 case BPF_JMP32 | BPF_JSGE | BPF_K:
1475 case BPF_JMP32 | BPF_JSLE | BPF_K:
38f51c07
DB
1476 /* test dst_reg, dst_reg to save one extra byte */
1477 if (imm32 == 0) {
74007cfc
BJ
1478 maybe_emit_mod(&prog, dst_reg, dst_reg,
1479 BPF_CLASS(insn->code) == BPF_JMP);
38f51c07
DB
1480 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1481 goto emit_cond_jmp;
1482 }
1483
e430f34e 1484 /* cmp dst_reg, imm8/32 */
3f5d6525
JW
1485 if (BPF_CLASS(insn->code) == BPF_JMP)
1486 EMIT1(add_1mod(0x48, dst_reg));
1487 else if (is_ereg(dst_reg))
1488 EMIT1(add_1mod(0x40, dst_reg));
62258278 1489
e430f34e
AS
1490 if (is_imm8(imm32))
1491 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
62258278 1492 else
e430f34e 1493 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
62258278 1494
a2c7a983 1495emit_cond_jmp: /* Convert BPF opcode to x86 */
62258278
AS
1496 switch (BPF_OP(insn->code)) {
1497 case BPF_JEQ:
1498 jmp_cond = X86_JE;
1499 break;
1500 case BPF_JSET:
1501 case BPF_JNE:
1502 jmp_cond = X86_JNE;
1503 break;
1504 case BPF_JGT:
1505 /* GT is unsigned '>', JA in x86 */
1506 jmp_cond = X86_JA;
1507 break;
52afc51e
DB
1508 case BPF_JLT:
1509 /* LT is unsigned '<', JB in x86 */
1510 jmp_cond = X86_JB;
1511 break;
62258278
AS
1512 case BPF_JGE:
1513 /* GE is unsigned '>=', JAE in x86 */
1514 jmp_cond = X86_JAE;
1515 break;
52afc51e
DB
1516 case BPF_JLE:
1517 /* LE is unsigned '<=', JBE in x86 */
1518 jmp_cond = X86_JBE;
1519 break;
62258278 1520 case BPF_JSGT:
a2c7a983 1521 /* Signed '>', GT in x86 */
62258278
AS
1522 jmp_cond = X86_JG;
1523 break;
52afc51e 1524 case BPF_JSLT:
a2c7a983 1525 /* Signed '<', LT in x86 */
52afc51e
DB
1526 jmp_cond = X86_JL;
1527 break;
62258278 1528 case BPF_JSGE:
a2c7a983 1529 /* Signed '>=', GE in x86 */
62258278
AS
1530 jmp_cond = X86_JGE;
1531 break;
52afc51e 1532 case BPF_JSLE:
a2c7a983 1533 /* Signed '<=', LE in x86 */
52afc51e
DB
1534 jmp_cond = X86_JLE;
1535 break;
a2c7a983 1536 default: /* to silence GCC warning */
62258278
AS
1537 return -EFAULT;
1538 }
1539 jmp_offset = addrs[i + insn->off] - addrs[i];
1540 if (is_imm8(jmp_offset)) {
93c5aecc
GL
1541 if (jmp_padding) {
1542 /* To keep the jmp_offset valid, the extra bytes are
d9f6e12f 1543 * padded before the jump insn, so we subtract the
93c5aecc
GL
1544 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1545 *
1546 * If the previous pass already emits an imm8
1547 * jmp_cond, then this BPF insn won't shrink, so
1548 * "nops" is 0.
1549 *
1550 * On the other hand, if the previous pass emits an
1551 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1552 * keep the image from shrinking further.
1553 *
1554 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1555 * is 2 bytes, so the size difference is 4 bytes.
1556 */
1557 nops = INSN_SZ_DIFF - 2;
1558 if (nops != 0 && nops != 4) {
1559 pr_err("unexpected jmp_cond padding: %d bytes\n",
1560 nops);
1561 return -EFAULT;
1562 }
ced50fc4 1563 emit_nops(&prog, nops);
93c5aecc 1564 }
62258278
AS
1565 EMIT2(jmp_cond, jmp_offset);
1566 } else if (is_simm32(jmp_offset)) {
1567 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1568 } else {
1569 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1570 return -EFAULT;
1571 }
1572
1573 break;
0a14842f 1574
62258278 1575 case BPF_JMP | BPF_JA:
1612a981
GB
1576 if (insn->off == -1)
1577 /* -1 jmp instructions will always jump
1578 * backwards two bytes. Explicitly handling
1579 * this case avoids wasting too many passes
1580 * when there are long sequences of replaced
1581 * dead code.
1582 */
1583 jmp_offset = -2;
1584 else
1585 jmp_offset = addrs[i + insn->off] - addrs[i];
1586
93c5aecc
GL
1587 if (!jmp_offset) {
1588 /*
1589 * If jmp_padding is enabled, the extra nops will
1590 * be inserted. Otherwise, optimize out nop jumps.
1591 */
1592 if (jmp_padding) {
1593 /* There are 3 possible conditions.
1594 * (1) This BPF_JA is already optimized out in
1595 * the previous run, so there is no need
1596 * to pad any extra byte (0 byte).
1597 * (2) The previous pass emits an imm8 jmp,
1598 * so we pad 2 bytes to match the previous
1599 * insn size.
1600 * (3) Similarly, the previous pass emits an
1601 * imm32 jmp, and 5 bytes is padded.
1602 */
1603 nops = INSN_SZ_DIFF;
1604 if (nops != 0 && nops != 2 && nops != 5) {
1605 pr_err("unexpected nop jump padding: %d bytes\n",
1606 nops);
1607 return -EFAULT;
1608 }
ced50fc4 1609 emit_nops(&prog, nops);
93c5aecc 1610 }
62258278 1611 break;
93c5aecc 1612 }
62258278
AS
1613emit_jmp:
1614 if (is_imm8(jmp_offset)) {
93c5aecc
GL
1615 if (jmp_padding) {
1616 /* To avoid breaking jmp_offset, the extra bytes
1617 * are padded before the actual jmp insn, so
d9f6e12f 1618 * 2 bytes is subtracted from INSN_SZ_DIFF.
93c5aecc
GL
1619 *
1620 * If the previous pass already emits an imm8
1621 * jmp, there is nothing to pad (0 byte).
1622 *
1623 * If it emits an imm32 jmp (5 bytes) previously
1624 * and now an imm8 jmp (2 bytes), then we pad
1625 * (5 - 2 = 3) bytes to stop the image from
1626 * shrinking further.
1627 */
1628 nops = INSN_SZ_DIFF - 2;
1629 if (nops != 0 && nops != 3) {
1630 pr_err("unexpected jump padding: %d bytes\n",
1631 nops);
1632 return -EFAULT;
1633 }
ced50fc4 1634 emit_nops(&prog, INSN_SZ_DIFF - 2);
93c5aecc 1635 }
62258278
AS
1636 EMIT2(0xEB, jmp_offset);
1637 } else if (is_simm32(jmp_offset)) {
1638 EMIT1_off32(0xE9, jmp_offset);
1639 } else {
1640 pr_err("jmp gen bug %llx\n", jmp_offset);
1641 return -EFAULT;
1642 }
1643 break;
1644
62258278 1645 case BPF_JMP | BPF_EXIT:
769e0de6 1646 if (seen_exit) {
62258278
AS
1647 jmp_offset = ctx->cleanup_addr - addrs[i];
1648 goto emit_jmp;
1649 }
769e0de6 1650 seen_exit = true;
a2c7a983 1651 /* Update cleanup_addr */
62258278 1652 ctx->cleanup_addr = proglen;
ebf7d1f5 1653 pop_callee_regs(&prog, callee_regs_used);
fe8d9571
AS
1654 EMIT1(0xC9); /* leave */
1655 EMIT1(0xC3); /* ret */
62258278
AS
1656 break;
1657
f3c2af7b 1658 default:
a2c7a983
IM
1659 /*
1660 * By design x86-64 JIT should support all BPF instructions.
62258278 1661 * This error will be seen if new instruction was added
a2c7a983
IM
1662 * to the interpreter, but not to the JIT, or if there is
1663 * junk in bpf_prog.
62258278
AS
1664 */
1665 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
f3c2af7b
AS
1666 return -EINVAL;
1667 }
62258278 1668
f3c2af7b 1669 ilen = prog - temp;
e0ee9c12 1670 if (ilen > BPF_MAX_INSN_SIZE) {
9383191d 1671 pr_err("bpf_jit: fatal insn size error\n");
e0ee9c12
AS
1672 return -EFAULT;
1673 }
1674
f3c2af7b 1675 if (image) {
e4d4d456
PK
1676 /*
1677 * When populating the image, assert that:
1678 *
1679 * i) We do not write beyond the allocated space, and
1680 * ii) addrs[i] did not change from the prior run, in order
1681 * to validate assumptions made for computing branch
1682 * displacements.
1683 */
1684 if (unlikely(proglen + ilen > oldproglen ||
1685 proglen + ilen != addrs[i])) {
9383191d 1686 pr_err("bpf_jit: fatal error\n");
f3c2af7b 1687 return -EFAULT;
0a14842f 1688 }
f3c2af7b 1689 memcpy(image + proglen, temp, ilen);
0a14842f 1690 }
f3c2af7b
AS
1691 proglen += ilen;
1692 addrs[i] = proglen;
1693 prog = temp;
1694 }
3dec541b
AS
1695
1696 if (image && excnt != bpf_prog->aux->num_exentries) {
1697 pr_err("extable is not populated\n");
1698 return -EFAULT;
1699 }
f3c2af7b
AS
1700 return proglen;
1701}
1702
85d33df3 1703static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
fec56f58
AS
1704 int stack_size)
1705{
1706 int i;
1707 /* Store function arguments to stack.
1708 * For a function that accepts two pointers the sequence will be:
1709 * mov QWORD PTR [rbp-0x10],rdi
1710 * mov QWORD PTR [rbp-0x8],rsi
1711 */
1712 for (i = 0; i < min(nr_args, 6); i++)
1713 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1714 BPF_REG_FP,
1715 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1716 -(stack_size - i * 8));
1717}
1718
85d33df3 1719static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
fec56f58
AS
1720 int stack_size)
1721{
1722 int i;
1723
1724 /* Restore function arguments from stack.
1725 * For a function that accepts two pointers the sequence will be:
1726 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1727 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1728 */
1729 for (i = 0; i < min(nr_args, 6); i++)
1730 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1731 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1732 BPF_REG_FP,
1733 -(stack_size - i * 8));
1734}
1735
7e639208 1736static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
ae240823 1737 struct bpf_prog *p, int stack_size, bool mod_ret)
7e639208
KS
1738{
1739 u8 *prog = *pprog;
ca06f55b 1740 u8 *jmp_insn;
7e639208 1741
ca06f55b
AS
1742 /* arg1: mov rdi, progs[i] */
1743 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
f2dd3b39
AS
1744 if (emit_call(&prog,
1745 p->aux->sleepable ? __bpf_prog_enter_sleepable :
1746 __bpf_prog_enter, prog))
1e6c62a8 1747 return -EINVAL;
f2dd3b39
AS
1748 /* remember prog start time returned by __bpf_prog_enter */
1749 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
7e639208 1750
ca06f55b
AS
1751 /* if (__bpf_prog_enter*(prog) == 0)
1752 * goto skip_exec_of_prog;
1753 */
1754 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
1755 /* emit 2 nops that will be replaced with JE insn */
1756 jmp_insn = prog;
1757 emit_nops(&prog, 2);
1758
7e639208
KS
1759 /* arg1: lea rdi, [rbp - stack_size] */
1760 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1761 /* arg2: progs[i]->insnsi for interpreter */
1762 if (!p->jited)
1763 emit_mov_imm64(&prog, BPF_REG_2,
1764 (long) p->insnsi >> 32,
1765 (u32) (long) p->insnsi);
1766 /* call JITed bpf program or interpreter */
1767 if (emit_call(&prog, p->bpf_func, prog))
1768 return -EINVAL;
1769
ae240823
KS
1770 /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1771 * of the previous call which is then passed on the stack to
1772 * the next BPF program.
1773 */
1774 if (mod_ret)
1775 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1776
ca06f55b
AS
1777 /* replace 2 nops with JE insn, since jmp target is known */
1778 jmp_insn[0] = X86_JE;
1779 jmp_insn[1] = prog - jmp_insn - 2;
1780
f2dd3b39
AS
1781 /* arg1: mov rdi, progs[i] */
1782 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1783 /* arg2: mov rsi, rbx <- start time in nsec */
1784 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1785 if (emit_call(&prog,
1786 p->aux->sleepable ? __bpf_prog_exit_sleepable :
1787 __bpf_prog_exit, prog))
1e6c62a8 1788 return -EINVAL;
7e639208
KS
1789
1790 *pprog = prog;
1791 return 0;
1792}
1793
7e639208
KS
1794static void emit_align(u8 **pprog, u32 align)
1795{
1796 u8 *target, *prog = *pprog;
1797
1798 target = PTR_ALIGN(prog, align);
1799 if (target != prog)
1800 emit_nops(&prog, target - prog);
1801
1802 *pprog = prog;
1803}
1804
1805static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1806{
1807 u8 *prog = *pprog;
7e639208
KS
1808 s64 offset;
1809
1810 offset = func - (ip + 2 + 4);
1811 if (!is_simm32(offset)) {
1812 pr_err("Target %p is out of range\n", func);
1813 return -EINVAL;
1814 }
1815 EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1816 *pprog = prog;
1817 return 0;
1818}
1819
85d33df3 1820static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
88fd9e53 1821 struct bpf_tramp_progs *tp, int stack_size)
fec56f58 1822{
7e639208 1823 int i;
fec56f58 1824 u8 *prog = *pprog;
fec56f58 1825
88fd9e53 1826 for (i = 0; i < tp->nr_progs; i++) {
ae240823
KS
1827 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
1828 return -EINVAL;
1829 }
1830 *pprog = prog;
1831 return 0;
1832}
1833
1834static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1835 struct bpf_tramp_progs *tp, int stack_size,
1836 u8 **branches)
1837{
1838 u8 *prog = *pprog;
ced50fc4 1839 int i;
ae240823
KS
1840
1841 /* The first fmod_ret program will receive a garbage return value.
1842 * Set this to 0 to avoid confusing the program.
1843 */
1844 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1845 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1846 for (i = 0; i < tp->nr_progs; i++) {
1847 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
fec56f58 1848 return -EINVAL;
ae240823 1849
13fac1d8
AS
1850 /* mod_ret prog stored return value into [rbp - 8]. Emit:
1851 * if (*(u64 *)(rbp - 8) != 0)
ae240823 1852 * goto do_fexit;
ae240823 1853 */
13fac1d8
AS
1854 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
1855 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
ae240823
KS
1856
1857 /* Save the location of the branch and Generate 6 nops
1858 * (4 bytes for an offset and 2 bytes for the jump) These nops
1859 * are replaced with a conditional jump once do_fexit (i.e. the
1860 * start of the fexit invocation) is finalized.
1861 */
1862 branches[i] = prog;
1863 emit_nops(&prog, 4 + 2);
fec56f58 1864 }
ae240823 1865
fec56f58
AS
1866 *pprog = prog;
1867 return 0;
1868}
1869
1870/* Example:
1871 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1872 * its 'struct btf_func_model' will be nr_args=2
1873 * The assembly code when eth_type_trans is executing after trampoline:
1874 *
1875 * push rbp
1876 * mov rbp, rsp
1877 * sub rsp, 16 // space for skb and dev
1878 * push rbx // temp regs to pass start time
1879 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
1880 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
1881 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1882 * mov rbx, rax // remember start time in bpf stats are enabled
1883 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
1884 * call addr_of_jited_FENTRY_prog
1885 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1886 * mov rsi, rbx // prog start time
1887 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1888 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
1889 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
1890 * pop rbx
1891 * leave
1892 * ret
1893 *
1894 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1895 * replaced with 'call generated_bpf_trampoline'. When it returns
1896 * eth_type_trans will continue executing with original skb and dev pointers.
1897 *
1898 * The assembly code when eth_type_trans is called from trampoline:
1899 *
1900 * push rbp
1901 * mov rbp, rsp
1902 * sub rsp, 24 // space for skb, dev, return value
1903 * push rbx // temp regs to pass start time
1904 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
1905 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
1906 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1907 * mov rbx, rax // remember start time if bpf stats are enabled
1908 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1909 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
1910 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1911 * mov rsi, rbx // prog start time
1912 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1913 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
1914 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
1915 * call eth_type_trans+5 // execute body of eth_type_trans
1916 * mov qword ptr [rbp - 8], rax // save return value
1917 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1918 * mov rbx, rax // remember start time in bpf stats are enabled
1919 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1920 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
1921 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1922 * mov rsi, rbx // prog start time
1923 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1924 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
1925 * pop rbx
1926 * leave
1927 * add rsp, 8 // skip eth_type_trans's frame
1928 * ret // return to its caller
1929 */
e21aa341 1930int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
85d33df3 1931 const struct btf_func_model *m, u32 flags,
88fd9e53 1932 struct bpf_tramp_progs *tprogs,
fec56f58
AS
1933 void *orig_call)
1934{
ced50fc4 1935 int ret, i, nr_args = m->nr_args;
fec56f58 1936 int stack_size = nr_args * 8;
88fd9e53
KS
1937 struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
1938 struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
ae240823
KS
1939 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
1940 u8 **branches = NULL;
fec56f58
AS
1941 u8 *prog;
1942
1943 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1944 if (nr_args > 6)
1945 return -ENOTSUPP;
1946
1947 if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1948 (flags & BPF_TRAMP_F_SKIP_FRAME))
1949 return -EINVAL;
1950
1951 if (flags & BPF_TRAMP_F_CALL_ORIG)
1952 stack_size += 8; /* room for return value of orig_call */
1953
1954 if (flags & BPF_TRAMP_F_SKIP_FRAME)
1955 /* skip patched call instruction and point orig_call to actual
1956 * body of the kernel function.
1957 */
4b3da77b 1958 orig_call += X86_PATCH_SIZE;
fec56f58
AS
1959
1960 prog = image;
1961
1962 EMIT1(0x55); /* push rbp */
1963 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
1964 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
1965 EMIT1(0x53); /* push rbx */
1966
1967 save_regs(m, &prog, nr_args, stack_size);
1968
e21aa341
AS
1969 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1970 /* arg1: mov rdi, im */
1971 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
1972 if (emit_call(&prog, __bpf_tramp_enter, prog)) {
1973 ret = -EINVAL;
1974 goto cleanup;
1975 }
1976 }
1977
88fd9e53
KS
1978 if (fentry->nr_progs)
1979 if (invoke_bpf(m, &prog, fentry, stack_size))
fec56f58
AS
1980 return -EINVAL;
1981
ae240823
KS
1982 if (fmod_ret->nr_progs) {
1983 branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
1984 GFP_KERNEL);
1985 if (!branches)
1986 return -ENOMEM;
1987
1988 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
1989 branches)) {
1990 ret = -EINVAL;
1991 goto cleanup;
1992 }
1993 }
1994
fec56f58 1995 if (flags & BPF_TRAMP_F_CALL_ORIG) {
e21aa341 1996 restore_regs(m, &prog, nr_args, stack_size);
fec56f58
AS
1997
1998 /* call original function */
ae240823
KS
1999 if (emit_call(&prog, orig_call, prog)) {
2000 ret = -EINVAL;
2001 goto cleanup;
2002 }
fec56f58
AS
2003 /* remember return value in a stack for bpf prog to access */
2004 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
e21aa341 2005 im->ip_after_call = prog;
b1f480bc 2006 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
b9082970 2007 prog += X86_PATCH_SIZE;
fec56f58
AS
2008 }
2009
ae240823
KS
2010 if (fmod_ret->nr_progs) {
2011 /* From Intel 64 and IA-32 Architectures Optimization
2012 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2013 * Coding Rule 11: All branch targets should be 16-byte
2014 * aligned.
2015 */
2016 emit_align(&prog, 16);
2017 /* Update the branches saved in invoke_bpf_mod_ret with the
2018 * aligned address of do_fexit.
2019 */
2020 for (i = 0; i < fmod_ret->nr_progs; i++)
2021 emit_cond_near_jump(&branches[i], prog, branches[i],
2022 X86_JNE);
2023 }
2024
88fd9e53 2025 if (fexit->nr_progs)
ae240823
KS
2026 if (invoke_bpf(m, &prog, fexit, stack_size)) {
2027 ret = -EINVAL;
2028 goto cleanup;
2029 }
fec56f58
AS
2030
2031 if (flags & BPF_TRAMP_F_RESTORE_REGS)
2032 restore_regs(m, &prog, nr_args, stack_size);
2033
ae240823
KS
2034 /* This needs to be done regardless. If there were fmod_ret programs,
2035 * the return value is only updated on the stack and still needs to be
2036 * restored to R0.
2037 */
e21aa341
AS
2038 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2039 im->ip_epilogue = prog;
2040 /* arg1: mov rdi, im */
2041 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2042 if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2043 ret = -EINVAL;
2044 goto cleanup;
2045 }
fec56f58
AS
2046 /* restore original return value back into RAX */
2047 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
e21aa341 2048 }
fec56f58
AS
2049
2050 EMIT1(0x5B); /* pop rbx */
2051 EMIT1(0xC9); /* leave */
2052 if (flags & BPF_TRAMP_F_SKIP_FRAME)
2053 /* skip our return address and return to parent */
2054 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2055 EMIT1(0xC3); /* ret */
85d33df3 2056 /* Make sure the trampoline generation logic doesn't overflow */
ae240823
KS
2057 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2058 ret = -EFAULT;
2059 goto cleanup;
2060 }
2061 ret = prog - (u8 *)image;
2062
2063cleanup:
2064 kfree(branches);
2065 return ret;
fec56f58
AS
2066}
2067
75ccbef6
BT
2068static int emit_fallback_jump(u8 **pprog)
2069{
2070 u8 *prog = *pprog;
2071 int err = 0;
2072
2073#ifdef CONFIG_RETPOLINE
2074 /* Note that this assumes the the compiler uses external
2075 * thunks for indirect calls. Both clang and GCC use the same
2076 * naming convention for external thunks.
2077 */
2078 err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
2079#else
75ccbef6
BT
2080 EMIT2(0xFF, 0xE2); /* jmp rdx */
2081#endif
2082 *pprog = prog;
2083 return err;
2084}
2085
2086static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
2087{
7e639208 2088 u8 *jg_reloc, *prog = *pprog;
ced50fc4 2089 int pivot, err, jg_bytes = 1;
75ccbef6
BT
2090 s64 jg_offset;
2091
2092 if (a == b) {
2093 /* Leaf node of recursion, i.e. not a range of indices
2094 * anymore.
2095 */
2096 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2097 if (!is_simm32(progs[a]))
2098 return -1;
2099 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2100 progs[a]);
2101 err = emit_cond_near_jump(&prog, /* je func */
2102 (void *)progs[a], prog,
2103 X86_JE);
2104 if (err)
2105 return err;
2106
2107 err = emit_fallback_jump(&prog); /* jmp thunk/indirect */
2108 if (err)
2109 return err;
2110
2111 *pprog = prog;
2112 return 0;
2113 }
2114
2115 /* Not a leaf node, so we pivot, and recursively descend into
2116 * the lower and upper ranges.
2117 */
2118 pivot = (b - a) / 2;
2119 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2120 if (!is_simm32(progs[a + pivot]))
2121 return -1;
2122 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2123
2124 if (pivot > 2) { /* jg upper_part */
2125 /* Require near jump. */
2126 jg_bytes = 4;
2127 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2128 } else {
2129 EMIT2(X86_JG, 0);
2130 }
2131 jg_reloc = prog;
2132
2133 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
2134 progs);
2135 if (err)
2136 return err;
2137
116eb788
BT
2138 /* From Intel 64 and IA-32 Architectures Optimization
2139 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2140 * Coding Rule 11: All branch targets should be 16-byte
2141 * aligned.
2142 */
7e639208 2143 emit_align(&prog, 16);
75ccbef6
BT
2144 jg_offset = prog - jg_reloc;
2145 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2146
2147 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
2148 b, progs);
2149 if (err)
2150 return err;
2151
2152 *pprog = prog;
2153 return 0;
2154}
2155
2156static int cmp_ips(const void *a, const void *b)
2157{
2158 const s64 *ipa = a;
2159 const s64 *ipb = b;
2160
2161 if (*ipa > *ipb)
2162 return 1;
2163 if (*ipa < *ipb)
2164 return -1;
2165 return 0;
2166}
2167
2168int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
2169{
2170 u8 *prog = image;
2171
2172 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2173 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
2174}
2175
1c2a088a
AS
2176struct x64_jit_data {
2177 struct bpf_binary_header *header;
2178 int *addrs;
2179 u8 *image;
2180 int proglen;
2181 struct jit_context ctx;
2182};
2183
93c5aecc
GL
2184#define MAX_PASSES 20
2185#define PADDING_PASSES (MAX_PASSES - 5)
2186
d1c55ab5 2187struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
f3c2af7b
AS
2188{
2189 struct bpf_binary_header *header = NULL;
959a7579 2190 struct bpf_prog *tmp, *orig_prog = prog;
1c2a088a 2191 struct x64_jit_data *jit_data;
f3c2af7b
AS
2192 int proglen, oldproglen = 0;
2193 struct jit_context ctx = {};
959a7579 2194 bool tmp_blinded = false;
1c2a088a 2195 bool extra_pass = false;
93c5aecc 2196 bool padding = false;
f3c2af7b
AS
2197 u8 *image = NULL;
2198 int *addrs;
2199 int pass;
2200 int i;
2201
60b58afc 2202 if (!prog->jit_requested)
959a7579
DB
2203 return orig_prog;
2204
2205 tmp = bpf_jit_blind_constants(prog);
a2c7a983
IM
2206 /*
2207 * If blinding was requested and we failed during blinding,
959a7579
DB
2208 * we must fall back to the interpreter.
2209 */
2210 if (IS_ERR(tmp))
2211 return orig_prog;
2212 if (tmp != prog) {
2213 tmp_blinded = true;
2214 prog = tmp;
2215 }
0a14842f 2216
1c2a088a
AS
2217 jit_data = prog->aux->jit_data;
2218 if (!jit_data) {
2219 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2220 if (!jit_data) {
2221 prog = orig_prog;
2222 goto out;
2223 }
2224 prog->aux->jit_data = jit_data;
2225 }
2226 addrs = jit_data->addrs;
2227 if (addrs) {
2228 ctx = jit_data->ctx;
2229 oldproglen = jit_data->proglen;
2230 image = jit_data->image;
2231 header = jit_data->header;
2232 extra_pass = true;
93c5aecc 2233 padding = true;
1c2a088a
AS
2234 goto skip_init_addrs;
2235 }
de920fc6 2236 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
959a7579
DB
2237 if (!addrs) {
2238 prog = orig_prog;
1c2a088a 2239 goto out_addrs;
959a7579 2240 }
f3c2af7b 2241
a2c7a983
IM
2242 /*
2243 * Before first pass, make a rough estimation of addrs[]
2244 * each BPF instruction is translated to less than 64 bytes
f3c2af7b 2245 */
7c2e988f 2246 for (proglen = 0, i = 0; i <= prog->len; i++) {
f3c2af7b
AS
2247 proglen += 64;
2248 addrs[i] = proglen;
2249 }
2250 ctx.cleanup_addr = proglen;
1c2a088a 2251skip_init_addrs:
f3c2af7b 2252
a2c7a983
IM
2253 /*
2254 * JITed image shrinks with every pass and the loop iterates
2255 * until the image stops shrinking. Very large BPF programs
3f7352bf 2256 * may converge on the last pass. In such case do one more
a2c7a983 2257 * pass to emit the final image.
3f7352bf 2258 */
93c5aecc
GL
2259 for (pass = 0; pass < MAX_PASSES || image; pass++) {
2260 if (!padding && pass >= PADDING_PASSES)
2261 padding = true;
2262 proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
f3c2af7b 2263 if (proglen <= 0) {
3aab8884 2264out_image:
f3c2af7b
AS
2265 image = NULL;
2266 if (header)
738cbe72 2267 bpf_jit_binary_free(header);
959a7579
DB
2268 prog = orig_prog;
2269 goto out_addrs;
f3c2af7b 2270 }
0a14842f 2271 if (image) {
e0ee9c12 2272 if (proglen != oldproglen) {
f3c2af7b
AS
2273 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2274 proglen, oldproglen);
3aab8884 2275 goto out_image;
e0ee9c12 2276 }
0a14842f
ED
2277 break;
2278 }
2279 if (proglen == oldproglen) {
3dec541b
AS
2280 /*
2281 * The number of entries in extable is the number of BPF_LDX
2282 * insns that access kernel memory via "pointer to BTF type".
2283 * The verifier changed their opcode from LDX|MEM|size
2284 * to LDX|PROBE_MEM|size to make JITing easier.
2285 */
2286 u32 align = __alignof__(struct exception_table_entry);
2287 u32 extable_size = prog->aux->num_exentries *
2288 sizeof(struct exception_table_entry);
2289
2290 /* allocate module memory for x86 insns and extable */
2291 header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
2292 &image, align, jit_fill_hole);
959a7579
DB
2293 if (!header) {
2294 prog = orig_prog;
2295 goto out_addrs;
2296 }
3dec541b 2297 prog->aux->extable = (void *) image + roundup(proglen, align);
0a14842f
ED
2298 }
2299 oldproglen = proglen;
6007b080 2300 cond_resched();
0a14842f 2301 }
79617801 2302
0a14842f 2303 if (bpf_jit_enable > 1)
485d6511 2304 bpf_jit_dump(prog->len, proglen, pass + 1, image);
0a14842f
ED
2305
2306 if (image) {
1c2a088a 2307 if (!prog->is_func || extra_pass) {
428d5df1 2308 bpf_tail_call_direct_fixup(prog);
1c2a088a
AS
2309 bpf_jit_binary_lock_ro(header);
2310 } else {
2311 jit_data->addrs = addrs;
2312 jit_data->ctx = ctx;
2313 jit_data->proglen = proglen;
2314 jit_data->image = image;
2315 jit_data->header = header;
2316 }
f3c2af7b 2317 prog->bpf_func = (void *)image;
a91263d5 2318 prog->jited = 1;
783d28dd 2319 prog->jited_len = proglen;
9d5ecb09
DB
2320 } else {
2321 prog = orig_prog;
0a14842f 2322 }
959a7579 2323
39f56ca9 2324 if (!image || !prog->is_func || extra_pass) {
c454a46b 2325 if (image)
7c2e988f 2326 bpf_prog_fill_jited_linfo(prog, addrs + 1);
959a7579 2327out_addrs:
de920fc6 2328 kvfree(addrs);
1c2a088a
AS
2329 kfree(jit_data);
2330 prog->aux->jit_data = NULL;
2331 }
959a7579
DB
2332out:
2333 if (tmp_blinded)
2334 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2335 tmp : orig_prog);
d1c55ab5 2336 return prog;
0a14842f 2337}
e6ac2450
MKL
2338
2339bool bpf_jit_supports_kfunc_call(void)
2340{
2341 return true;
2342}