]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-only | |
2 | /* | |
3 | * bpf_jit_comp.c: BPF JIT compiler | |
4 | * | |
5 | * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) | |
6 | * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com | |
7 | */ | |
8 | #include <linux/netdevice.h> | |
9 | #include <linux/filter.h> | |
10 | #include <linux/if_vlan.h> | |
11 | #include <linux/bpf.h> | |
12 | #include <linux/memory.h> | |
13 | #include <linux/sort.h> | |
14 | #include <asm/extable.h> | |
15 | #include <asm/set_memory.h> | |
16 | #include <asm/nospec-branch.h> | |
17 | #include <asm/text-patching.h> | |
18 | #include <asm/asm-prototypes.h> | |
19 | ||
20 | static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) | |
21 | { | |
22 | if (len == 1) | |
23 | *ptr = bytes; | |
24 | else if (len == 2) | |
25 | *(u16 *)ptr = bytes; | |
26 | else { | |
27 | *(u32 *)ptr = bytes; | |
28 | barrier(); | |
29 | } | |
30 | return ptr + len; | |
31 | } | |
32 | ||
33 | #define EMIT(bytes, len) \ | |
34 | do { prog = emit_code(prog, bytes, len); } while (0) | |
35 | ||
36 | #define EMIT1(b1) EMIT(b1, 1) | |
37 | #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) | |
38 | #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) | |
39 | #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) | |
40 | ||
41 | #define EMIT1_off32(b1, off) \ | |
42 | do { EMIT1(b1); EMIT(off, 4); } while (0) | |
43 | #define EMIT2_off32(b1, b2, off) \ | |
44 | do { EMIT2(b1, b2); EMIT(off, 4); } while (0) | |
45 | #define EMIT3_off32(b1, b2, b3, off) \ | |
46 | do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) | |
47 | #define EMIT4_off32(b1, b2, b3, b4, off) \ | |
48 | do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) | |
49 | ||
50 | static bool is_imm8(int value) | |
51 | { | |
52 | return value <= 127 && value >= -128; | |
53 | } | |
54 | ||
55 | static bool is_simm32(s64 value) | |
56 | { | |
57 | return value == (s64)(s32)value; | |
58 | } | |
59 | ||
60 | static bool is_uimm32(u64 value) | |
61 | { | |
62 | return value == (u64)(u32)value; | |
63 | } | |
64 | ||
65 | /* mov dst, src */ | |
66 | #define EMIT_mov(DST, SRC) \ | |
67 | do { \ | |
68 | if (DST != SRC) \ | |
69 | EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ | |
70 | } while (0) | |
71 | ||
72 | static int bpf_size_to_x86_bytes(int bpf_size) | |
73 | { | |
74 | if (bpf_size == BPF_W) | |
75 | return 4; | |
76 | else if (bpf_size == BPF_H) | |
77 | return 2; | |
78 | else if (bpf_size == BPF_B) | |
79 | return 1; | |
80 | else if (bpf_size == BPF_DW) | |
81 | return 4; /* imm32 */ | |
82 | else | |
83 | return 0; | |
84 | } | |
85 | ||
86 | /* | |
87 | * List of x86 cond jumps opcodes (. + s8) | |
88 | * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) | |
89 | */ | |
90 | #define X86_JB 0x72 | |
91 | #define X86_JAE 0x73 | |
92 | #define X86_JE 0x74 | |
93 | #define X86_JNE 0x75 | |
94 | #define X86_JBE 0x76 | |
95 | #define X86_JA 0x77 | |
96 | #define X86_JL 0x7C | |
97 | #define X86_JGE 0x7D | |
98 | #define X86_JLE 0x7E | |
99 | #define X86_JG 0x7F | |
100 | ||
101 | /* Pick a register outside of BPF range for JIT internal work */ | |
102 | #define AUX_REG (MAX_BPF_JIT_REG + 1) | |
103 | #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) | |
104 | ||
105 | /* | |
106 | * The following table maps BPF registers to x86-64 registers. | |
107 | * | |
108 | * x86-64 register R12 is unused, since if used as base address | |
109 | * register in load/store instructions, it always needs an | |
110 | * extra byte of encoding and is callee saved. | |
111 | * | |
112 | * x86-64 register R9 is not used by BPF programs, but can be used by BPF | |
113 | * trampoline. x86-64 register R10 is used for blinding (if enabled). | |
114 | */ | |
115 | static const int reg2hex[] = { | |
116 | [BPF_REG_0] = 0, /* RAX */ | |
117 | [BPF_REG_1] = 7, /* RDI */ | |
118 | [BPF_REG_2] = 6, /* RSI */ | |
119 | [BPF_REG_3] = 2, /* RDX */ | |
120 | [BPF_REG_4] = 1, /* RCX */ | |
121 | [BPF_REG_5] = 0, /* R8 */ | |
122 | [BPF_REG_6] = 3, /* RBX callee saved */ | |
123 | [BPF_REG_7] = 5, /* R13 callee saved */ | |
124 | [BPF_REG_8] = 6, /* R14 callee saved */ | |
125 | [BPF_REG_9] = 7, /* R15 callee saved */ | |
126 | [BPF_REG_FP] = 5, /* RBP readonly */ | |
127 | [BPF_REG_AX] = 2, /* R10 temp register */ | |
128 | [AUX_REG] = 3, /* R11 temp register */ | |
129 | [X86_REG_R9] = 1, /* R9 register, 6th function argument */ | |
130 | }; | |
131 | ||
132 | static const int reg2pt_regs[] = { | |
133 | [BPF_REG_0] = offsetof(struct pt_regs, ax), | |
134 | [BPF_REG_1] = offsetof(struct pt_regs, di), | |
135 | [BPF_REG_2] = offsetof(struct pt_regs, si), | |
136 | [BPF_REG_3] = offsetof(struct pt_regs, dx), | |
137 | [BPF_REG_4] = offsetof(struct pt_regs, cx), | |
138 | [BPF_REG_5] = offsetof(struct pt_regs, r8), | |
139 | [BPF_REG_6] = offsetof(struct pt_regs, bx), | |
140 | [BPF_REG_7] = offsetof(struct pt_regs, r13), | |
141 | [BPF_REG_8] = offsetof(struct pt_regs, r14), | |
142 | [BPF_REG_9] = offsetof(struct pt_regs, r15), | |
143 | }; | |
144 | ||
145 | /* | |
146 | * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 | |
147 | * which need extra byte of encoding. | |
148 | * rax,rcx,...,rbp have simpler encoding | |
149 | */ | |
150 | static bool is_ereg(u32 reg) | |
151 | { | |
152 | return (1 << reg) & (BIT(BPF_REG_5) | | |
153 | BIT(AUX_REG) | | |
154 | BIT(BPF_REG_7) | | |
155 | BIT(BPF_REG_8) | | |
156 | BIT(BPF_REG_9) | | |
157 | BIT(X86_REG_R9) | | |
158 | BIT(BPF_REG_AX)); | |
159 | } | |
160 | ||
161 | /* | |
162 | * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 | |
163 | * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte | |
164 | * of encoding. al,cl,dl,bl have simpler encoding. | |
165 | */ | |
166 | static bool is_ereg_8l(u32 reg) | |
167 | { | |
168 | return is_ereg(reg) || | |
169 | (1 << reg) & (BIT(BPF_REG_1) | | |
170 | BIT(BPF_REG_2) | | |
171 | BIT(BPF_REG_FP)); | |
172 | } | |
173 | ||
174 | static bool is_axreg(u32 reg) | |
175 | { | |
176 | return reg == BPF_REG_0; | |
177 | } | |
178 | ||
179 | /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ | |
180 | static u8 add_1mod(u8 byte, u32 reg) | |
181 | { | |
182 | if (is_ereg(reg)) | |
183 | byte |= 1; | |
184 | return byte; | |
185 | } | |
186 | ||
187 | static u8 add_2mod(u8 byte, u32 r1, u32 r2) | |
188 | { | |
189 | if (is_ereg(r1)) | |
190 | byte |= 1; | |
191 | if (is_ereg(r2)) | |
192 | byte |= 4; | |
193 | return byte; | |
194 | } | |
195 | ||
196 | /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ | |
197 | static u8 add_1reg(u8 byte, u32 dst_reg) | |
198 | { | |
199 | return byte + reg2hex[dst_reg]; | |
200 | } | |
201 | ||
202 | /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ | |
203 | static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) | |
204 | { | |
205 | return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); | |
206 | } | |
207 | ||
208 | /* Some 1-byte opcodes for binary ALU operations */ | |
209 | static u8 simple_alu_opcodes[] = { | |
210 | [BPF_ADD] = 0x01, | |
211 | [BPF_SUB] = 0x29, | |
212 | [BPF_AND] = 0x21, | |
213 | [BPF_OR] = 0x09, | |
214 | [BPF_XOR] = 0x31, | |
215 | [BPF_LSH] = 0xE0, | |
216 | [BPF_RSH] = 0xE8, | |
217 | [BPF_ARSH] = 0xF8, | |
218 | }; | |
219 | ||
220 | static void jit_fill_hole(void *area, unsigned int size) | |
221 | { | |
222 | /* Fill whole space with INT3 instructions */ | |
223 | memset(area, 0xcc, size); | |
224 | } | |
225 | ||
226 | struct jit_context { | |
227 | int cleanup_addr; /* Epilogue code offset */ | |
228 | }; | |
229 | ||
230 | /* Maximum number of bytes emitted while JITing one eBPF insn */ | |
231 | #define BPF_MAX_INSN_SIZE 128 | |
232 | #define BPF_INSN_SAFETY 64 | |
233 | ||
234 | /* Number of bytes emit_patch() needs to generate instructions */ | |
235 | #define X86_PATCH_SIZE 5 | |
236 | /* Number of bytes that will be skipped on tailcall */ | |
237 | #define X86_TAIL_CALL_OFFSET 11 | |
238 | ||
239 | static void push_callee_regs(u8 **pprog, bool *callee_regs_used) | |
240 | { | |
241 | u8 *prog = *pprog; | |
242 | ||
243 | if (callee_regs_used[0]) | |
244 | EMIT1(0x53); /* push rbx */ | |
245 | if (callee_regs_used[1]) | |
246 | EMIT2(0x41, 0x55); /* push r13 */ | |
247 | if (callee_regs_used[2]) | |
248 | EMIT2(0x41, 0x56); /* push r14 */ | |
249 | if (callee_regs_used[3]) | |
250 | EMIT2(0x41, 0x57); /* push r15 */ | |
251 | *pprog = prog; | |
252 | } | |
253 | ||
254 | static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) | |
255 | { | |
256 | u8 *prog = *pprog; | |
257 | ||
258 | if (callee_regs_used[3]) | |
259 | EMIT2(0x41, 0x5F); /* pop r15 */ | |
260 | if (callee_regs_used[2]) | |
261 | EMIT2(0x41, 0x5E); /* pop r14 */ | |
262 | if (callee_regs_used[1]) | |
263 | EMIT2(0x41, 0x5D); /* pop r13 */ | |
264 | if (callee_regs_used[0]) | |
265 | EMIT1(0x5B); /* pop rbx */ | |
266 | *pprog = prog; | |
267 | } | |
268 | ||
269 | /* | |
270 | * Emit x86-64 prologue code for BPF program. | |
271 | * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes | |
272 | * while jumping to another program | |
273 | */ | |
274 | static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, | |
275 | bool tail_call_reachable, bool is_subprog) | |
276 | { | |
277 | u8 *prog = *pprog; | |
278 | ||
279 | /* BPF trampoline can be made to work without these nops, | |
280 | * but let's waste 5 bytes for now and optimize later | |
281 | */ | |
282 | memcpy(prog, x86_nops[5], X86_PATCH_SIZE); | |
283 | prog += X86_PATCH_SIZE; | |
284 | if (!ebpf_from_cbpf) { | |
285 | if (tail_call_reachable && !is_subprog) | |
286 | EMIT2(0x31, 0xC0); /* xor eax, eax */ | |
287 | else | |
288 | EMIT2(0x66, 0x90); /* nop2 */ | |
289 | } | |
290 | EMIT1(0x55); /* push rbp */ | |
291 | EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ | |
292 | /* sub rsp, rounded_stack_depth */ | |
293 | if (stack_depth) | |
294 | EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); | |
295 | if (tail_call_reachable) | |
296 | EMIT1(0x50); /* push rax */ | |
297 | *pprog = prog; | |
298 | } | |
299 | ||
300 | static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) | |
301 | { | |
302 | u8 *prog = *pprog; | |
303 | s64 offset; | |
304 | ||
305 | offset = func - (ip + X86_PATCH_SIZE); | |
306 | if (!is_simm32(offset)) { | |
307 | pr_err("Target call %p is out of range\n", func); | |
308 | return -ERANGE; | |
309 | } | |
310 | EMIT1_off32(opcode, offset); | |
311 | *pprog = prog; | |
312 | return 0; | |
313 | } | |
314 | ||
315 | static int emit_call(u8 **pprog, void *func, void *ip) | |
316 | { | |
317 | return emit_patch(pprog, func, ip, 0xE8); | |
318 | } | |
319 | ||
320 | static int emit_jump(u8 **pprog, void *func, void *ip) | |
321 | { | |
322 | return emit_patch(pprog, func, ip, 0xE9); | |
323 | } | |
324 | ||
325 | static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, | |
326 | void *old_addr, void *new_addr, | |
327 | const bool text_live) | |
328 | { | |
329 | const u8 *nop_insn = x86_nops[5]; | |
330 | u8 old_insn[X86_PATCH_SIZE]; | |
331 | u8 new_insn[X86_PATCH_SIZE]; | |
332 | u8 *prog; | |
333 | int ret; | |
334 | ||
335 | memcpy(old_insn, nop_insn, X86_PATCH_SIZE); | |
336 | if (old_addr) { | |
337 | prog = old_insn; | |
338 | ret = t == BPF_MOD_CALL ? | |
339 | emit_call(&prog, old_addr, ip) : | |
340 | emit_jump(&prog, old_addr, ip); | |
341 | if (ret) | |
342 | return ret; | |
343 | } | |
344 | ||
345 | memcpy(new_insn, nop_insn, X86_PATCH_SIZE); | |
346 | if (new_addr) { | |
347 | prog = new_insn; | |
348 | ret = t == BPF_MOD_CALL ? | |
349 | emit_call(&prog, new_addr, ip) : | |
350 | emit_jump(&prog, new_addr, ip); | |
351 | if (ret) | |
352 | return ret; | |
353 | } | |
354 | ||
355 | ret = -EBUSY; | |
356 | mutex_lock(&text_mutex); | |
357 | if (memcmp(ip, old_insn, X86_PATCH_SIZE)) | |
358 | goto out; | |
359 | ret = 1; | |
360 | if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { | |
361 | if (text_live) | |
362 | text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); | |
363 | else | |
364 | memcpy(ip, new_insn, X86_PATCH_SIZE); | |
365 | ret = 0; | |
366 | } | |
367 | out: | |
368 | mutex_unlock(&text_mutex); | |
369 | return ret; | |
370 | } | |
371 | ||
372 | int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, | |
373 | void *old_addr, void *new_addr) | |
374 | { | |
375 | if (!is_kernel_text((long)ip) && | |
376 | !is_bpf_text_address((long)ip)) | |
377 | /* BPF poking in modules is not supported */ | |
378 | return -EINVAL; | |
379 | ||
380 | return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true); | |
381 | } | |
382 | ||
383 | static int get_pop_bytes(bool *callee_regs_used) | |
384 | { | |
385 | int bytes = 0; | |
386 | ||
387 | if (callee_regs_used[3]) | |
388 | bytes += 2; | |
389 | if (callee_regs_used[2]) | |
390 | bytes += 2; | |
391 | if (callee_regs_used[1]) | |
392 | bytes += 2; | |
393 | if (callee_regs_used[0]) | |
394 | bytes += 1; | |
395 | ||
396 | return bytes; | |
397 | } | |
398 | ||
399 | /* | |
400 | * Generate the following code: | |
401 | * | |
402 | * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... | |
403 | * if (index >= array->map.max_entries) | |
404 | * goto out; | |
405 | * if (++tail_call_cnt > MAX_TAIL_CALL_CNT) | |
406 | * goto out; | |
407 | * prog = array->ptrs[index]; | |
408 | * if (prog == NULL) | |
409 | * goto out; | |
410 | * goto *(prog->bpf_func + prologue_size); | |
411 | * out: | |
412 | */ | |
413 | static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, | |
414 | u32 stack_depth) | |
415 | { | |
416 | int tcc_off = -4 - round_up(stack_depth, 8); | |
417 | u8 *prog = *pprog; | |
418 | int pop_bytes = 0; | |
419 | int off1 = 42; | |
420 | int off2 = 31; | |
421 | int off3 = 9; | |
422 | ||
423 | /* count the additional bytes used for popping callee regs from stack | |
424 | * that need to be taken into account for each of the offsets that | |
425 | * are used for bailing out of the tail call | |
426 | */ | |
427 | pop_bytes = get_pop_bytes(callee_regs_used); | |
428 | off1 += pop_bytes; | |
429 | off2 += pop_bytes; | |
430 | off3 += pop_bytes; | |
431 | ||
432 | if (stack_depth) { | |
433 | off1 += 7; | |
434 | off2 += 7; | |
435 | off3 += 7; | |
436 | } | |
437 | ||
438 | /* | |
439 | * rdi - pointer to ctx | |
440 | * rsi - pointer to bpf_array | |
441 | * rdx - index in bpf_array | |
442 | */ | |
443 | ||
444 | /* | |
445 | * if (index >= array->map.max_entries) | |
446 | * goto out; | |
447 | */ | |
448 | EMIT2(0x89, 0xD2); /* mov edx, edx */ | |
449 | EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ | |
450 | offsetof(struct bpf_array, map.max_entries)); | |
451 | #define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */ | |
452 | EMIT2(X86_JBE, OFFSET1); /* jbe out */ | |
453 | ||
454 | /* | |
455 | * if (tail_call_cnt > MAX_TAIL_CALL_CNT) | |
456 | * goto out; | |
457 | */ | |
458 | EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ | |
459 | EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ | |
460 | #define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE) | |
461 | EMIT2(X86_JA, OFFSET2); /* ja out */ | |
462 | EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ | |
463 | EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ | |
464 | ||
465 | /* prog = array->ptrs[index]; */ | |
466 | EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ | |
467 | offsetof(struct bpf_array, ptrs)); | |
468 | ||
469 | /* | |
470 | * if (prog == NULL) | |
471 | * goto out; | |
472 | */ | |
473 | EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ | |
474 | #define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE) | |
475 | EMIT2(X86_JE, OFFSET3); /* je out */ | |
476 | ||
477 | *pprog = prog; | |
478 | pop_callee_regs(pprog, callee_regs_used); | |
479 | prog = *pprog; | |
480 | ||
481 | EMIT1(0x58); /* pop rax */ | |
482 | if (stack_depth) | |
483 | EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ | |
484 | round_up(stack_depth, 8)); | |
485 | ||
486 | /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ | |
487 | EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ | |
488 | offsetof(struct bpf_prog, bpf_func)); | |
489 | EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ | |
490 | X86_TAIL_CALL_OFFSET); | |
491 | /* | |
492 | * Now we're ready to jump into next BPF program | |
493 | * rdi == ctx (1st arg) | |
494 | * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET | |
495 | */ | |
496 | RETPOLINE_RCX_BPF_JIT(); | |
497 | ||
498 | /* out: */ | |
499 | *pprog = prog; | |
500 | } | |
501 | ||
502 | static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, | |
503 | u8 **pprog, int addr, u8 *image, | |
504 | bool *callee_regs_used, u32 stack_depth) | |
505 | { | |
506 | int tcc_off = -4 - round_up(stack_depth, 8); | |
507 | u8 *prog = *pprog; | |
508 | int pop_bytes = 0; | |
509 | int off1 = 20; | |
510 | int poke_off; | |
511 | ||
512 | /* count the additional bytes used for popping callee regs to stack | |
513 | * that need to be taken into account for jump offset that is used for | |
514 | * bailing out from of the tail call when limit is reached | |
515 | */ | |
516 | pop_bytes = get_pop_bytes(callee_regs_used); | |
517 | off1 += pop_bytes; | |
518 | ||
519 | /* | |
520 | * total bytes for: | |
521 | * - nop5/ jmpq $off | |
522 | * - pop callee regs | |
523 | * - sub rsp, $val if depth > 0 | |
524 | * - pop rax | |
525 | */ | |
526 | poke_off = X86_PATCH_SIZE + pop_bytes + 1; | |
527 | if (stack_depth) { | |
528 | poke_off += 7; | |
529 | off1 += 7; | |
530 | } | |
531 | ||
532 | /* | |
533 | * if (tail_call_cnt > MAX_TAIL_CALL_CNT) | |
534 | * goto out; | |
535 | */ | |
536 | EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ | |
537 | EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ | |
538 | EMIT2(X86_JA, off1); /* ja out */ | |
539 | EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ | |
540 | EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ | |
541 | ||
542 | poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE); | |
543 | poke->adj_off = X86_TAIL_CALL_OFFSET; | |
544 | poke->tailcall_target = image + (addr - X86_PATCH_SIZE); | |
545 | poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; | |
546 | ||
547 | emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, | |
548 | poke->tailcall_bypass); | |
549 | ||
550 | *pprog = prog; | |
551 | pop_callee_regs(pprog, callee_regs_used); | |
552 | prog = *pprog; | |
553 | EMIT1(0x58); /* pop rax */ | |
554 | if (stack_depth) | |
555 | EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); | |
556 | ||
557 | memcpy(prog, x86_nops[5], X86_PATCH_SIZE); | |
558 | prog += X86_PATCH_SIZE; | |
559 | /* out: */ | |
560 | ||
561 | *pprog = prog; | |
562 | } | |
563 | ||
564 | static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) | |
565 | { | |
566 | struct bpf_jit_poke_descriptor *poke; | |
567 | struct bpf_array *array; | |
568 | struct bpf_prog *target; | |
569 | int i, ret; | |
570 | ||
571 | for (i = 0; i < prog->aux->size_poke_tab; i++) { | |
572 | poke = &prog->aux->poke_tab[i]; | |
573 | WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); | |
574 | ||
575 | if (poke->reason != BPF_POKE_REASON_TAIL_CALL) | |
576 | continue; | |
577 | ||
578 | array = container_of(poke->tail_call.map, struct bpf_array, map); | |
579 | mutex_lock(&array->aux->poke_mutex); | |
580 | target = array->ptrs[poke->tail_call.key]; | |
581 | if (target) { | |
582 | /* Plain memcpy is used when image is not live yet | |
583 | * and still not locked as read-only. Once poke | |
584 | * location is active (poke->tailcall_target_stable), | |
585 | * any parallel bpf_arch_text_poke() might occur | |
586 | * still on the read-write image until we finally | |
587 | * locked it as read-only. Both modifications on | |
588 | * the given image are under text_mutex to avoid | |
589 | * interference. | |
590 | */ | |
591 | ret = __bpf_arch_text_poke(poke->tailcall_target, | |
592 | BPF_MOD_JUMP, NULL, | |
593 | (u8 *)target->bpf_func + | |
594 | poke->adj_off, false); | |
595 | BUG_ON(ret < 0); | |
596 | ret = __bpf_arch_text_poke(poke->tailcall_bypass, | |
597 | BPF_MOD_JUMP, | |
598 | (u8 *)poke->tailcall_target + | |
599 | X86_PATCH_SIZE, NULL, false); | |
600 | BUG_ON(ret < 0); | |
601 | } | |
602 | WRITE_ONCE(poke->tailcall_target_stable, true); | |
603 | mutex_unlock(&array->aux->poke_mutex); | |
604 | } | |
605 | } | |
606 | ||
607 | static void emit_mov_imm32(u8 **pprog, bool sign_propagate, | |
608 | u32 dst_reg, const u32 imm32) | |
609 | { | |
610 | u8 *prog = *pprog; | |
611 | u8 b1, b2, b3; | |
612 | ||
613 | /* | |
614 | * Optimization: if imm32 is positive, use 'mov %eax, imm32' | |
615 | * (which zero-extends imm32) to save 2 bytes. | |
616 | */ | |
617 | if (sign_propagate && (s32)imm32 < 0) { | |
618 | /* 'mov %rax, imm32' sign extends imm32 */ | |
619 | b1 = add_1mod(0x48, dst_reg); | |
620 | b2 = 0xC7; | |
621 | b3 = 0xC0; | |
622 | EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); | |
623 | goto done; | |
624 | } | |
625 | ||
626 | /* | |
627 | * Optimization: if imm32 is zero, use 'xor %eax, %eax' | |
628 | * to save 3 bytes. | |
629 | */ | |
630 | if (imm32 == 0) { | |
631 | if (is_ereg(dst_reg)) | |
632 | EMIT1(add_2mod(0x40, dst_reg, dst_reg)); | |
633 | b2 = 0x31; /* xor */ | |
634 | b3 = 0xC0; | |
635 | EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); | |
636 | goto done; | |
637 | } | |
638 | ||
639 | /* mov %eax, imm32 */ | |
640 | if (is_ereg(dst_reg)) | |
641 | EMIT1(add_1mod(0x40, dst_reg)); | |
642 | EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); | |
643 | done: | |
644 | *pprog = prog; | |
645 | } | |
646 | ||
647 | static void emit_mov_imm64(u8 **pprog, u32 dst_reg, | |
648 | const u32 imm32_hi, const u32 imm32_lo) | |
649 | { | |
650 | u8 *prog = *pprog; | |
651 | ||
652 | if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { | |
653 | /* | |
654 | * For emitting plain u32, where sign bit must not be | |
655 | * propagated LLVM tends to load imm64 over mov32 | |
656 | * directly, so save couple of bytes by just doing | |
657 | * 'mov %eax, imm32' instead. | |
658 | */ | |
659 | emit_mov_imm32(&prog, false, dst_reg, imm32_lo); | |
660 | } else { | |
661 | /* movabsq %rax, imm64 */ | |
662 | EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); | |
663 | EMIT(imm32_lo, 4); | |
664 | EMIT(imm32_hi, 4); | |
665 | } | |
666 | ||
667 | *pprog = prog; | |
668 | } | |
669 | ||
670 | static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) | |
671 | { | |
672 | u8 *prog = *pprog; | |
673 | ||
674 | if (is64) { | |
675 | /* mov dst, src */ | |
676 | EMIT_mov(dst_reg, src_reg); | |
677 | } else { | |
678 | /* mov32 dst, src */ | |
679 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
680 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); | |
681 | EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); | |
682 | } | |
683 | ||
684 | *pprog = prog; | |
685 | } | |
686 | ||
687 | /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ | |
688 | static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) | |
689 | { | |
690 | u8 *prog = *pprog; | |
691 | ||
692 | if (is_imm8(off)) { | |
693 | /* 1-byte signed displacement. | |
694 | * | |
695 | * If off == 0 we could skip this and save one extra byte, but | |
696 | * special case of x86 R13 which always needs an offset is not | |
697 | * worth the hassle | |
698 | */ | |
699 | EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); | |
700 | } else { | |
701 | /* 4-byte signed displacement */ | |
702 | EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); | |
703 | } | |
704 | *pprog = prog; | |
705 | } | |
706 | ||
707 | /* | |
708 | * Emit a REX byte if it will be necessary to address these registers | |
709 | */ | |
710 | static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) | |
711 | { | |
712 | u8 *prog = *pprog; | |
713 | ||
714 | if (is64) | |
715 | EMIT1(add_2mod(0x48, dst_reg, src_reg)); | |
716 | else if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
717 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); | |
718 | *pprog = prog; | |
719 | } | |
720 | ||
721 | /* LDX: dst_reg = *(u8*)(src_reg + off) */ | |
722 | static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) | |
723 | { | |
724 | u8 *prog = *pprog; | |
725 | ||
726 | switch (size) { | |
727 | case BPF_B: | |
728 | /* Emit 'movzx rax, byte ptr [rax + off]' */ | |
729 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); | |
730 | break; | |
731 | case BPF_H: | |
732 | /* Emit 'movzx rax, word ptr [rax + off]' */ | |
733 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); | |
734 | break; | |
735 | case BPF_W: | |
736 | /* Emit 'mov eax, dword ptr [rax+0x14]' */ | |
737 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
738 | EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); | |
739 | else | |
740 | EMIT1(0x8B); | |
741 | break; | |
742 | case BPF_DW: | |
743 | /* Emit 'mov rax, qword ptr [rax+0x14]' */ | |
744 | EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); | |
745 | break; | |
746 | } | |
747 | emit_insn_suffix(&prog, src_reg, dst_reg, off); | |
748 | *pprog = prog; | |
749 | } | |
750 | ||
751 | /* STX: *(u8*)(dst_reg + off) = src_reg */ | |
752 | static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) | |
753 | { | |
754 | u8 *prog = *pprog; | |
755 | ||
756 | switch (size) { | |
757 | case BPF_B: | |
758 | /* Emit 'mov byte ptr [rax + off], al' */ | |
759 | if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) | |
760 | /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ | |
761 | EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); | |
762 | else | |
763 | EMIT1(0x88); | |
764 | break; | |
765 | case BPF_H: | |
766 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
767 | EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); | |
768 | else | |
769 | EMIT2(0x66, 0x89); | |
770 | break; | |
771 | case BPF_W: | |
772 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
773 | EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); | |
774 | else | |
775 | EMIT1(0x89); | |
776 | break; | |
777 | case BPF_DW: | |
778 | EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); | |
779 | break; | |
780 | } | |
781 | emit_insn_suffix(&prog, dst_reg, src_reg, off); | |
782 | *pprog = prog; | |
783 | } | |
784 | ||
785 | static int emit_atomic(u8 **pprog, u8 atomic_op, | |
786 | u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) | |
787 | { | |
788 | u8 *prog = *pprog; | |
789 | ||
790 | EMIT1(0xF0); /* lock prefix */ | |
791 | ||
792 | maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); | |
793 | ||
794 | /* emit opcode */ | |
795 | switch (atomic_op) { | |
796 | case BPF_ADD: | |
797 | case BPF_SUB: | |
798 | case BPF_AND: | |
799 | case BPF_OR: | |
800 | case BPF_XOR: | |
801 | /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ | |
802 | EMIT1(simple_alu_opcodes[atomic_op]); | |
803 | break; | |
804 | case BPF_ADD | BPF_FETCH: | |
805 | /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ | |
806 | EMIT2(0x0F, 0xC1); | |
807 | break; | |
808 | case BPF_XCHG: | |
809 | /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ | |
810 | EMIT1(0x87); | |
811 | break; | |
812 | case BPF_CMPXCHG: | |
813 | /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ | |
814 | EMIT2(0x0F, 0xB1); | |
815 | break; | |
816 | default: | |
817 | pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); | |
818 | return -EFAULT; | |
819 | } | |
820 | ||
821 | emit_insn_suffix(&prog, dst_reg, src_reg, off); | |
822 | ||
823 | *pprog = prog; | |
824 | return 0; | |
825 | } | |
826 | ||
827 | static bool ex_handler_bpf(const struct exception_table_entry *x, | |
828 | struct pt_regs *regs, int trapnr, | |
829 | unsigned long error_code, unsigned long fault_addr) | |
830 | { | |
831 | u32 reg = x->fixup >> 8; | |
832 | ||
833 | /* jump over faulting load and clear dest register */ | |
834 | *(unsigned long *)((void *)regs + reg) = 0; | |
835 | regs->ip += x->fixup & 0xff; | |
836 | return true; | |
837 | } | |
838 | ||
839 | static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, | |
840 | bool *regs_used, bool *tail_call_seen) | |
841 | { | |
842 | int i; | |
843 | ||
844 | for (i = 1; i <= insn_cnt; i++, insn++) { | |
845 | if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) | |
846 | *tail_call_seen = true; | |
847 | if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) | |
848 | regs_used[0] = true; | |
849 | if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) | |
850 | regs_used[1] = true; | |
851 | if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) | |
852 | regs_used[2] = true; | |
853 | if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) | |
854 | regs_used[3] = true; | |
855 | } | |
856 | } | |
857 | ||
858 | static void emit_nops(u8 **pprog, int len) | |
859 | { | |
860 | u8 *prog = *pprog; | |
861 | int i, noplen; | |
862 | ||
863 | while (len > 0) { | |
864 | noplen = len; | |
865 | ||
866 | if (noplen > ASM_NOP_MAX) | |
867 | noplen = ASM_NOP_MAX; | |
868 | ||
869 | for (i = 0; i < noplen; i++) | |
870 | EMIT1(x86_nops[noplen][i]); | |
871 | len -= noplen; | |
872 | } | |
873 | ||
874 | *pprog = prog; | |
875 | } | |
876 | ||
877 | #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) | |
878 | ||
879 | static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, | |
880 | int oldproglen, struct jit_context *ctx, bool jmp_padding) | |
881 | { | |
882 | bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; | |
883 | struct bpf_insn *insn = bpf_prog->insnsi; | |
884 | bool callee_regs_used[4] = {}; | |
885 | int insn_cnt = bpf_prog->len; | |
886 | bool tail_call_seen = false; | |
887 | bool seen_exit = false; | |
888 | u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; | |
889 | int i, excnt = 0; | |
890 | int ilen, proglen = 0; | |
891 | u8 *prog = temp; | |
892 | int err; | |
893 | ||
894 | detect_reg_usage(insn, insn_cnt, callee_regs_used, | |
895 | &tail_call_seen); | |
896 | ||
897 | /* tail call's presence in current prog implies it is reachable */ | |
898 | tail_call_reachable |= tail_call_seen; | |
899 | ||
900 | emit_prologue(&prog, bpf_prog->aux->stack_depth, | |
901 | bpf_prog_was_classic(bpf_prog), tail_call_reachable, | |
902 | bpf_prog->aux->func_idx != 0); | |
903 | push_callee_regs(&prog, callee_regs_used); | |
904 | ||
905 | ilen = prog - temp; | |
906 | if (image) | |
907 | memcpy(image + proglen, temp, ilen); | |
908 | proglen += ilen; | |
909 | addrs[0] = proglen; | |
910 | prog = temp; | |
911 | ||
912 | for (i = 1; i <= insn_cnt; i++, insn++) { | |
913 | const s32 imm32 = insn->imm; | |
914 | u32 dst_reg = insn->dst_reg; | |
915 | u32 src_reg = insn->src_reg; | |
916 | u8 b2 = 0, b3 = 0; | |
917 | u8 *start_of_ldx; | |
918 | s64 jmp_offset; | |
919 | u8 jmp_cond; | |
920 | u8 *func; | |
921 | int nops; | |
922 | ||
923 | switch (insn->code) { | |
924 | /* ALU */ | |
925 | case BPF_ALU | BPF_ADD | BPF_X: | |
926 | case BPF_ALU | BPF_SUB | BPF_X: | |
927 | case BPF_ALU | BPF_AND | BPF_X: | |
928 | case BPF_ALU | BPF_OR | BPF_X: | |
929 | case BPF_ALU | BPF_XOR | BPF_X: | |
930 | case BPF_ALU64 | BPF_ADD | BPF_X: | |
931 | case BPF_ALU64 | BPF_SUB | BPF_X: | |
932 | case BPF_ALU64 | BPF_AND | BPF_X: | |
933 | case BPF_ALU64 | BPF_OR | BPF_X: | |
934 | case BPF_ALU64 | BPF_XOR | BPF_X: | |
935 | maybe_emit_mod(&prog, dst_reg, src_reg, | |
936 | BPF_CLASS(insn->code) == BPF_ALU64); | |
937 | b2 = simple_alu_opcodes[BPF_OP(insn->code)]; | |
938 | EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); | |
939 | break; | |
940 | ||
941 | case BPF_ALU64 | BPF_MOV | BPF_X: | |
942 | case BPF_ALU | BPF_MOV | BPF_X: | |
943 | emit_mov_reg(&prog, | |
944 | BPF_CLASS(insn->code) == BPF_ALU64, | |
945 | dst_reg, src_reg); | |
946 | break; | |
947 | ||
948 | /* neg dst */ | |
949 | case BPF_ALU | BPF_NEG: | |
950 | case BPF_ALU64 | BPF_NEG: | |
951 | if (BPF_CLASS(insn->code) == BPF_ALU64) | |
952 | EMIT1(add_1mod(0x48, dst_reg)); | |
953 | else if (is_ereg(dst_reg)) | |
954 | EMIT1(add_1mod(0x40, dst_reg)); | |
955 | EMIT2(0xF7, add_1reg(0xD8, dst_reg)); | |
956 | break; | |
957 | ||
958 | case BPF_ALU | BPF_ADD | BPF_K: | |
959 | case BPF_ALU | BPF_SUB | BPF_K: | |
960 | case BPF_ALU | BPF_AND | BPF_K: | |
961 | case BPF_ALU | BPF_OR | BPF_K: | |
962 | case BPF_ALU | BPF_XOR | BPF_K: | |
963 | case BPF_ALU64 | BPF_ADD | BPF_K: | |
964 | case BPF_ALU64 | BPF_SUB | BPF_K: | |
965 | case BPF_ALU64 | BPF_AND | BPF_K: | |
966 | case BPF_ALU64 | BPF_OR | BPF_K: | |
967 | case BPF_ALU64 | BPF_XOR | BPF_K: | |
968 | if (BPF_CLASS(insn->code) == BPF_ALU64) | |
969 | EMIT1(add_1mod(0x48, dst_reg)); | |
970 | else if (is_ereg(dst_reg)) | |
971 | EMIT1(add_1mod(0x40, dst_reg)); | |
972 | ||
973 | /* | |
974 | * b3 holds 'normal' opcode, b2 short form only valid | |
975 | * in case dst is eax/rax. | |
976 | */ | |
977 | switch (BPF_OP(insn->code)) { | |
978 | case BPF_ADD: | |
979 | b3 = 0xC0; | |
980 | b2 = 0x05; | |
981 | break; | |
982 | case BPF_SUB: | |
983 | b3 = 0xE8; | |
984 | b2 = 0x2D; | |
985 | break; | |
986 | case BPF_AND: | |
987 | b3 = 0xE0; | |
988 | b2 = 0x25; | |
989 | break; | |
990 | case BPF_OR: | |
991 | b3 = 0xC8; | |
992 | b2 = 0x0D; | |
993 | break; | |
994 | case BPF_XOR: | |
995 | b3 = 0xF0; | |
996 | b2 = 0x35; | |
997 | break; | |
998 | } | |
999 | ||
1000 | if (is_imm8(imm32)) | |
1001 | EMIT3(0x83, add_1reg(b3, dst_reg), imm32); | |
1002 | else if (is_axreg(dst_reg)) | |
1003 | EMIT1_off32(b2, imm32); | |
1004 | else | |
1005 | EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); | |
1006 | break; | |
1007 | ||
1008 | case BPF_ALU64 | BPF_MOV | BPF_K: | |
1009 | case BPF_ALU | BPF_MOV | BPF_K: | |
1010 | emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, | |
1011 | dst_reg, imm32); | |
1012 | break; | |
1013 | ||
1014 | case BPF_LD | BPF_IMM | BPF_DW: | |
1015 | emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); | |
1016 | insn++; | |
1017 | i++; | |
1018 | break; | |
1019 | ||
1020 | /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ | |
1021 | case BPF_ALU | BPF_MOD | BPF_X: | |
1022 | case BPF_ALU | BPF_DIV | BPF_X: | |
1023 | case BPF_ALU | BPF_MOD | BPF_K: | |
1024 | case BPF_ALU | BPF_DIV | BPF_K: | |
1025 | case BPF_ALU64 | BPF_MOD | BPF_X: | |
1026 | case BPF_ALU64 | BPF_DIV | BPF_X: | |
1027 | case BPF_ALU64 | BPF_MOD | BPF_K: | |
1028 | case BPF_ALU64 | BPF_DIV | BPF_K: | |
1029 | EMIT1(0x50); /* push rax */ | |
1030 | EMIT1(0x52); /* push rdx */ | |
1031 | ||
1032 | if (BPF_SRC(insn->code) == BPF_X) | |
1033 | /* mov r11, src_reg */ | |
1034 | EMIT_mov(AUX_REG, src_reg); | |
1035 | else | |
1036 | /* mov r11, imm32 */ | |
1037 | EMIT3_off32(0x49, 0xC7, 0xC3, imm32); | |
1038 | ||
1039 | /* mov rax, dst_reg */ | |
1040 | EMIT_mov(BPF_REG_0, dst_reg); | |
1041 | ||
1042 | /* | |
1043 | * xor edx, edx | |
1044 | * equivalent to 'xor rdx, rdx', but one byte less | |
1045 | */ | |
1046 | EMIT2(0x31, 0xd2); | |
1047 | ||
1048 | if (BPF_CLASS(insn->code) == BPF_ALU64) | |
1049 | /* div r11 */ | |
1050 | EMIT3(0x49, 0xF7, 0xF3); | |
1051 | else | |
1052 | /* div r11d */ | |
1053 | EMIT3(0x41, 0xF7, 0xF3); | |
1054 | ||
1055 | if (BPF_OP(insn->code) == BPF_MOD) | |
1056 | /* mov r11, rdx */ | |
1057 | EMIT3(0x49, 0x89, 0xD3); | |
1058 | else | |
1059 | /* mov r11, rax */ | |
1060 | EMIT3(0x49, 0x89, 0xC3); | |
1061 | ||
1062 | EMIT1(0x5A); /* pop rdx */ | |
1063 | EMIT1(0x58); /* pop rax */ | |
1064 | ||
1065 | /* mov dst_reg, r11 */ | |
1066 | EMIT_mov(dst_reg, AUX_REG); | |
1067 | break; | |
1068 | ||
1069 | case BPF_ALU | BPF_MUL | BPF_K: | |
1070 | case BPF_ALU | BPF_MUL | BPF_X: | |
1071 | case BPF_ALU64 | BPF_MUL | BPF_K: | |
1072 | case BPF_ALU64 | BPF_MUL | BPF_X: | |
1073 | { | |
1074 | bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; | |
1075 | ||
1076 | if (dst_reg != BPF_REG_0) | |
1077 | EMIT1(0x50); /* push rax */ | |
1078 | if (dst_reg != BPF_REG_3) | |
1079 | EMIT1(0x52); /* push rdx */ | |
1080 | ||
1081 | /* mov r11, dst_reg */ | |
1082 | EMIT_mov(AUX_REG, dst_reg); | |
1083 | ||
1084 | if (BPF_SRC(insn->code) == BPF_X) | |
1085 | emit_mov_reg(&prog, is64, BPF_REG_0, src_reg); | |
1086 | else | |
1087 | emit_mov_imm32(&prog, is64, BPF_REG_0, imm32); | |
1088 | ||
1089 | if (is64) | |
1090 | EMIT1(add_1mod(0x48, AUX_REG)); | |
1091 | else if (is_ereg(AUX_REG)) | |
1092 | EMIT1(add_1mod(0x40, AUX_REG)); | |
1093 | /* mul(q) r11 */ | |
1094 | EMIT2(0xF7, add_1reg(0xE0, AUX_REG)); | |
1095 | ||
1096 | if (dst_reg != BPF_REG_3) | |
1097 | EMIT1(0x5A); /* pop rdx */ | |
1098 | if (dst_reg != BPF_REG_0) { | |
1099 | /* mov dst_reg, rax */ | |
1100 | EMIT_mov(dst_reg, BPF_REG_0); | |
1101 | EMIT1(0x58); /* pop rax */ | |
1102 | } | |
1103 | break; | |
1104 | } | |
1105 | /* Shifts */ | |
1106 | case BPF_ALU | BPF_LSH | BPF_K: | |
1107 | case BPF_ALU | BPF_RSH | BPF_K: | |
1108 | case BPF_ALU | BPF_ARSH | BPF_K: | |
1109 | case BPF_ALU64 | BPF_LSH | BPF_K: | |
1110 | case BPF_ALU64 | BPF_RSH | BPF_K: | |
1111 | case BPF_ALU64 | BPF_ARSH | BPF_K: | |
1112 | if (BPF_CLASS(insn->code) == BPF_ALU64) | |
1113 | EMIT1(add_1mod(0x48, dst_reg)); | |
1114 | else if (is_ereg(dst_reg)) | |
1115 | EMIT1(add_1mod(0x40, dst_reg)); | |
1116 | ||
1117 | b3 = simple_alu_opcodes[BPF_OP(insn->code)]; | |
1118 | if (imm32 == 1) | |
1119 | EMIT2(0xD1, add_1reg(b3, dst_reg)); | |
1120 | else | |
1121 | EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); | |
1122 | break; | |
1123 | ||
1124 | case BPF_ALU | BPF_LSH | BPF_X: | |
1125 | case BPF_ALU | BPF_RSH | BPF_X: | |
1126 | case BPF_ALU | BPF_ARSH | BPF_X: | |
1127 | case BPF_ALU64 | BPF_LSH | BPF_X: | |
1128 | case BPF_ALU64 | BPF_RSH | BPF_X: | |
1129 | case BPF_ALU64 | BPF_ARSH | BPF_X: | |
1130 | ||
1131 | /* Check for bad case when dst_reg == rcx */ | |
1132 | if (dst_reg == BPF_REG_4) { | |
1133 | /* mov r11, dst_reg */ | |
1134 | EMIT_mov(AUX_REG, dst_reg); | |
1135 | dst_reg = AUX_REG; | |
1136 | } | |
1137 | ||
1138 | if (src_reg != BPF_REG_4) { /* common case */ | |
1139 | EMIT1(0x51); /* push rcx */ | |
1140 | ||
1141 | /* mov rcx, src_reg */ | |
1142 | EMIT_mov(BPF_REG_4, src_reg); | |
1143 | } | |
1144 | ||
1145 | /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ | |
1146 | if (BPF_CLASS(insn->code) == BPF_ALU64) | |
1147 | EMIT1(add_1mod(0x48, dst_reg)); | |
1148 | else if (is_ereg(dst_reg)) | |
1149 | EMIT1(add_1mod(0x40, dst_reg)); | |
1150 | ||
1151 | b3 = simple_alu_opcodes[BPF_OP(insn->code)]; | |
1152 | EMIT2(0xD3, add_1reg(b3, dst_reg)); | |
1153 | ||
1154 | if (src_reg != BPF_REG_4) | |
1155 | EMIT1(0x59); /* pop rcx */ | |
1156 | ||
1157 | if (insn->dst_reg == BPF_REG_4) | |
1158 | /* mov dst_reg, r11 */ | |
1159 | EMIT_mov(insn->dst_reg, AUX_REG); | |
1160 | break; | |
1161 | ||
1162 | case BPF_ALU | BPF_END | BPF_FROM_BE: | |
1163 | switch (imm32) { | |
1164 | case 16: | |
1165 | /* Emit 'ror %ax, 8' to swap lower 2 bytes */ | |
1166 | EMIT1(0x66); | |
1167 | if (is_ereg(dst_reg)) | |
1168 | EMIT1(0x41); | |
1169 | EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); | |
1170 | ||
1171 | /* Emit 'movzwl eax, ax' */ | |
1172 | if (is_ereg(dst_reg)) | |
1173 | EMIT3(0x45, 0x0F, 0xB7); | |
1174 | else | |
1175 | EMIT2(0x0F, 0xB7); | |
1176 | EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); | |
1177 | break; | |
1178 | case 32: | |
1179 | /* Emit 'bswap eax' to swap lower 4 bytes */ | |
1180 | if (is_ereg(dst_reg)) | |
1181 | EMIT2(0x41, 0x0F); | |
1182 | else | |
1183 | EMIT1(0x0F); | |
1184 | EMIT1(add_1reg(0xC8, dst_reg)); | |
1185 | break; | |
1186 | case 64: | |
1187 | /* Emit 'bswap rax' to swap 8 bytes */ | |
1188 | EMIT3(add_1mod(0x48, dst_reg), 0x0F, | |
1189 | add_1reg(0xC8, dst_reg)); | |
1190 | break; | |
1191 | } | |
1192 | break; | |
1193 | ||
1194 | case BPF_ALU | BPF_END | BPF_FROM_LE: | |
1195 | switch (imm32) { | |
1196 | case 16: | |
1197 | /* | |
1198 | * Emit 'movzwl eax, ax' to zero extend 16-bit | |
1199 | * into 64 bit | |
1200 | */ | |
1201 | if (is_ereg(dst_reg)) | |
1202 | EMIT3(0x45, 0x0F, 0xB7); | |
1203 | else | |
1204 | EMIT2(0x0F, 0xB7); | |
1205 | EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); | |
1206 | break; | |
1207 | case 32: | |
1208 | /* Emit 'mov eax, eax' to clear upper 32-bits */ | |
1209 | if (is_ereg(dst_reg)) | |
1210 | EMIT1(0x45); | |
1211 | EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); | |
1212 | break; | |
1213 | case 64: | |
1214 | /* nop */ | |
1215 | break; | |
1216 | } | |
1217 | break; | |
1218 | ||
1219 | /* ST: *(u8*)(dst_reg + off) = imm */ | |
1220 | case BPF_ST | BPF_MEM | BPF_B: | |
1221 | if (is_ereg(dst_reg)) | |
1222 | EMIT2(0x41, 0xC6); | |
1223 | else | |
1224 | EMIT1(0xC6); | |
1225 | goto st; | |
1226 | case BPF_ST | BPF_MEM | BPF_H: | |
1227 | if (is_ereg(dst_reg)) | |
1228 | EMIT3(0x66, 0x41, 0xC7); | |
1229 | else | |
1230 | EMIT2(0x66, 0xC7); | |
1231 | goto st; | |
1232 | case BPF_ST | BPF_MEM | BPF_W: | |
1233 | if (is_ereg(dst_reg)) | |
1234 | EMIT2(0x41, 0xC7); | |
1235 | else | |
1236 | EMIT1(0xC7); | |
1237 | goto st; | |
1238 | case BPF_ST | BPF_MEM | BPF_DW: | |
1239 | EMIT2(add_1mod(0x48, dst_reg), 0xC7); | |
1240 | ||
1241 | st: if (is_imm8(insn->off)) | |
1242 | EMIT2(add_1reg(0x40, dst_reg), insn->off); | |
1243 | else | |
1244 | EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); | |
1245 | ||
1246 | EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); | |
1247 | break; | |
1248 | ||
1249 | /* STX: *(u8*)(dst_reg + off) = src_reg */ | |
1250 | case BPF_STX | BPF_MEM | BPF_B: | |
1251 | case BPF_STX | BPF_MEM | BPF_H: | |
1252 | case BPF_STX | BPF_MEM | BPF_W: | |
1253 | case BPF_STX | BPF_MEM | BPF_DW: | |
1254 | emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); | |
1255 | break; | |
1256 | ||
1257 | /* LDX: dst_reg = *(u8*)(src_reg + off) */ | |
1258 | case BPF_LDX | BPF_MEM | BPF_B: | |
1259 | case BPF_LDX | BPF_PROBE_MEM | BPF_B: | |
1260 | case BPF_LDX | BPF_MEM | BPF_H: | |
1261 | case BPF_LDX | BPF_PROBE_MEM | BPF_H: | |
1262 | case BPF_LDX | BPF_MEM | BPF_W: | |
1263 | case BPF_LDX | BPF_PROBE_MEM | BPF_W: | |
1264 | case BPF_LDX | BPF_MEM | BPF_DW: | |
1265 | case BPF_LDX | BPF_PROBE_MEM | BPF_DW: | |
1266 | if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { | |
1267 | /* test src_reg, src_reg */ | |
1268 | maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */ | |
1269 | EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg)); | |
1270 | /* jne start_of_ldx */ | |
1271 | EMIT2(X86_JNE, 0); | |
1272 | /* xor dst_reg, dst_reg */ | |
1273 | emit_mov_imm32(&prog, false, dst_reg, 0); | |
1274 | /* jmp byte_after_ldx */ | |
1275 | EMIT2(0xEB, 0); | |
1276 | ||
1277 | /* populate jmp_offset for JNE above */ | |
1278 | temp[4] = prog - temp - 5 /* sizeof(test + jne) */; | |
1279 | start_of_ldx = prog; | |
1280 | } | |
1281 | emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); | |
1282 | if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { | |
1283 | struct exception_table_entry *ex; | |
1284 | u8 *_insn = image + proglen; | |
1285 | s64 delta; | |
1286 | ||
1287 | /* populate jmp_offset for JMP above */ | |
1288 | start_of_ldx[-1] = prog - start_of_ldx; | |
1289 | ||
1290 | if (!bpf_prog->aux->extable) | |
1291 | break; | |
1292 | ||
1293 | if (excnt >= bpf_prog->aux->num_exentries) { | |
1294 | pr_err("ex gen bug\n"); | |
1295 | return -EFAULT; | |
1296 | } | |
1297 | ex = &bpf_prog->aux->extable[excnt++]; | |
1298 | ||
1299 | delta = _insn - (u8 *)&ex->insn; | |
1300 | if (!is_simm32(delta)) { | |
1301 | pr_err("extable->insn doesn't fit into 32-bit\n"); | |
1302 | return -EFAULT; | |
1303 | } | |
1304 | ex->insn = delta; | |
1305 | ||
1306 | delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler; | |
1307 | if (!is_simm32(delta)) { | |
1308 | pr_err("extable->handler doesn't fit into 32-bit\n"); | |
1309 | return -EFAULT; | |
1310 | } | |
1311 | ex->handler = delta; | |
1312 | ||
1313 | if (dst_reg > BPF_REG_9) { | |
1314 | pr_err("verifier error\n"); | |
1315 | return -EFAULT; | |
1316 | } | |
1317 | /* | |
1318 | * Compute size of x86 insn and its target dest x86 register. | |
1319 | * ex_handler_bpf() will use lower 8 bits to adjust | |
1320 | * pt_regs->ip to jump over this x86 instruction | |
1321 | * and upper bits to figure out which pt_regs to zero out. | |
1322 | * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" | |
1323 | * of 4 bytes will be ignored and rbx will be zero inited. | |
1324 | */ | |
1325 | ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8); | |
1326 | } | |
1327 | break; | |
1328 | ||
1329 | case BPF_STX | BPF_ATOMIC | BPF_W: | |
1330 | case BPF_STX | BPF_ATOMIC | BPF_DW: | |
1331 | if (insn->imm == (BPF_AND | BPF_FETCH) || | |
1332 | insn->imm == (BPF_OR | BPF_FETCH) || | |
1333 | insn->imm == (BPF_XOR | BPF_FETCH)) { | |
1334 | u8 *branch_target; | |
1335 | bool is64 = BPF_SIZE(insn->code) == BPF_DW; | |
1336 | u32 real_src_reg = src_reg; | |
1337 | ||
1338 | /* | |
1339 | * Can't be implemented with a single x86 insn. | |
1340 | * Need to do a CMPXCHG loop. | |
1341 | */ | |
1342 | ||
1343 | /* Will need RAX as a CMPXCHG operand so save R0 */ | |
1344 | emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); | |
1345 | if (src_reg == BPF_REG_0) | |
1346 | real_src_reg = BPF_REG_AX; | |
1347 | ||
1348 | branch_target = prog; | |
1349 | /* Load old value */ | |
1350 | emit_ldx(&prog, BPF_SIZE(insn->code), | |
1351 | BPF_REG_0, dst_reg, insn->off); | |
1352 | /* | |
1353 | * Perform the (commutative) operation locally, | |
1354 | * put the result in the AUX_REG. | |
1355 | */ | |
1356 | emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); | |
1357 | maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); | |
1358 | EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], | |
1359 | add_2reg(0xC0, AUX_REG, real_src_reg)); | |
1360 | /* Attempt to swap in new value */ | |
1361 | err = emit_atomic(&prog, BPF_CMPXCHG, | |
1362 | dst_reg, AUX_REG, insn->off, | |
1363 | BPF_SIZE(insn->code)); | |
1364 | if (WARN_ON(err)) | |
1365 | return err; | |
1366 | /* | |
1367 | * ZF tells us whether we won the race. If it's | |
1368 | * cleared we need to try again. | |
1369 | */ | |
1370 | EMIT2(X86_JNE, -(prog - branch_target) - 2); | |
1371 | /* Return the pre-modification value */ | |
1372 | emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); | |
1373 | /* Restore R0 after clobbering RAX */ | |
1374 | emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); | |
1375 | break; | |
1376 | ||
1377 | } | |
1378 | ||
1379 | err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, | |
1380 | insn->off, BPF_SIZE(insn->code)); | |
1381 | if (err) | |
1382 | return err; | |
1383 | break; | |
1384 | ||
1385 | /* call */ | |
1386 | case BPF_JMP | BPF_CALL: | |
1387 | func = (u8 *) __bpf_call_base + imm32; | |
1388 | if (tail_call_reachable) { | |
1389 | EMIT3_off32(0x48, 0x8B, 0x85, | |
1390 | -(bpf_prog->aux->stack_depth + 8)); | |
1391 | if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) | |
1392 | return -EINVAL; | |
1393 | } else { | |
1394 | if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) | |
1395 | return -EINVAL; | |
1396 | } | |
1397 | break; | |
1398 | ||
1399 | case BPF_JMP | BPF_TAIL_CALL: | |
1400 | if (imm32) | |
1401 | emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], | |
1402 | &prog, addrs[i], image, | |
1403 | callee_regs_used, | |
1404 | bpf_prog->aux->stack_depth); | |
1405 | else | |
1406 | emit_bpf_tail_call_indirect(&prog, | |
1407 | callee_regs_used, | |
1408 | bpf_prog->aux->stack_depth); | |
1409 | break; | |
1410 | ||
1411 | /* cond jump */ | |
1412 | case BPF_JMP | BPF_JEQ | BPF_X: | |
1413 | case BPF_JMP | BPF_JNE | BPF_X: | |
1414 | case BPF_JMP | BPF_JGT | BPF_X: | |
1415 | case BPF_JMP | BPF_JLT | BPF_X: | |
1416 | case BPF_JMP | BPF_JGE | BPF_X: | |
1417 | case BPF_JMP | BPF_JLE | BPF_X: | |
1418 | case BPF_JMP | BPF_JSGT | BPF_X: | |
1419 | case BPF_JMP | BPF_JSLT | BPF_X: | |
1420 | case BPF_JMP | BPF_JSGE | BPF_X: | |
1421 | case BPF_JMP | BPF_JSLE | BPF_X: | |
1422 | case BPF_JMP32 | BPF_JEQ | BPF_X: | |
1423 | case BPF_JMP32 | BPF_JNE | BPF_X: | |
1424 | case BPF_JMP32 | BPF_JGT | BPF_X: | |
1425 | case BPF_JMP32 | BPF_JLT | BPF_X: | |
1426 | case BPF_JMP32 | BPF_JGE | BPF_X: | |
1427 | case BPF_JMP32 | BPF_JLE | BPF_X: | |
1428 | case BPF_JMP32 | BPF_JSGT | BPF_X: | |
1429 | case BPF_JMP32 | BPF_JSLT | BPF_X: | |
1430 | case BPF_JMP32 | BPF_JSGE | BPF_X: | |
1431 | case BPF_JMP32 | BPF_JSLE | BPF_X: | |
1432 | /* cmp dst_reg, src_reg */ | |
1433 | maybe_emit_mod(&prog, dst_reg, src_reg, | |
1434 | BPF_CLASS(insn->code) == BPF_JMP); | |
1435 | EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); | |
1436 | goto emit_cond_jmp; | |
1437 | ||
1438 | case BPF_JMP | BPF_JSET | BPF_X: | |
1439 | case BPF_JMP32 | BPF_JSET | BPF_X: | |
1440 | /* test dst_reg, src_reg */ | |
1441 | maybe_emit_mod(&prog, dst_reg, src_reg, | |
1442 | BPF_CLASS(insn->code) == BPF_JMP); | |
1443 | EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); | |
1444 | goto emit_cond_jmp; | |
1445 | ||
1446 | case BPF_JMP | BPF_JSET | BPF_K: | |
1447 | case BPF_JMP32 | BPF_JSET | BPF_K: | |
1448 | /* test dst_reg, imm32 */ | |
1449 | if (BPF_CLASS(insn->code) == BPF_JMP) | |
1450 | EMIT1(add_1mod(0x48, dst_reg)); | |
1451 | else if (is_ereg(dst_reg)) | |
1452 | EMIT1(add_1mod(0x40, dst_reg)); | |
1453 | EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); | |
1454 | goto emit_cond_jmp; | |
1455 | ||
1456 | case BPF_JMP | BPF_JEQ | BPF_K: | |
1457 | case BPF_JMP | BPF_JNE | BPF_K: | |
1458 | case BPF_JMP | BPF_JGT | BPF_K: | |
1459 | case BPF_JMP | BPF_JLT | BPF_K: | |
1460 | case BPF_JMP | BPF_JGE | BPF_K: | |
1461 | case BPF_JMP | BPF_JLE | BPF_K: | |
1462 | case BPF_JMP | BPF_JSGT | BPF_K: | |
1463 | case BPF_JMP | BPF_JSLT | BPF_K: | |
1464 | case BPF_JMP | BPF_JSGE | BPF_K: | |
1465 | case BPF_JMP | BPF_JSLE | BPF_K: | |
1466 | case BPF_JMP32 | BPF_JEQ | BPF_K: | |
1467 | case BPF_JMP32 | BPF_JNE | BPF_K: | |
1468 | case BPF_JMP32 | BPF_JGT | BPF_K: | |
1469 | case BPF_JMP32 | BPF_JLT | BPF_K: | |
1470 | case BPF_JMP32 | BPF_JGE | BPF_K: | |
1471 | case BPF_JMP32 | BPF_JLE | BPF_K: | |
1472 | case BPF_JMP32 | BPF_JSGT | BPF_K: | |
1473 | case BPF_JMP32 | BPF_JSLT | BPF_K: | |
1474 | case BPF_JMP32 | BPF_JSGE | BPF_K: | |
1475 | case BPF_JMP32 | BPF_JSLE | BPF_K: | |
1476 | /* test dst_reg, dst_reg to save one extra byte */ | |
1477 | if (imm32 == 0) { | |
1478 | maybe_emit_mod(&prog, dst_reg, dst_reg, | |
1479 | BPF_CLASS(insn->code) == BPF_JMP); | |
1480 | EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); | |
1481 | goto emit_cond_jmp; | |
1482 | } | |
1483 | ||
1484 | /* cmp dst_reg, imm8/32 */ | |
1485 | if (BPF_CLASS(insn->code) == BPF_JMP) | |
1486 | EMIT1(add_1mod(0x48, dst_reg)); | |
1487 | else if (is_ereg(dst_reg)) | |
1488 | EMIT1(add_1mod(0x40, dst_reg)); | |
1489 | ||
1490 | if (is_imm8(imm32)) | |
1491 | EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); | |
1492 | else | |
1493 | EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); | |
1494 | ||
1495 | emit_cond_jmp: /* Convert BPF opcode to x86 */ | |
1496 | switch (BPF_OP(insn->code)) { | |
1497 | case BPF_JEQ: | |
1498 | jmp_cond = X86_JE; | |
1499 | break; | |
1500 | case BPF_JSET: | |
1501 | case BPF_JNE: | |
1502 | jmp_cond = X86_JNE; | |
1503 | break; | |
1504 | case BPF_JGT: | |
1505 | /* GT is unsigned '>', JA in x86 */ | |
1506 | jmp_cond = X86_JA; | |
1507 | break; | |
1508 | case BPF_JLT: | |
1509 | /* LT is unsigned '<', JB in x86 */ | |
1510 | jmp_cond = X86_JB; | |
1511 | break; | |
1512 | case BPF_JGE: | |
1513 | /* GE is unsigned '>=', JAE in x86 */ | |
1514 | jmp_cond = X86_JAE; | |
1515 | break; | |
1516 | case BPF_JLE: | |
1517 | /* LE is unsigned '<=', JBE in x86 */ | |
1518 | jmp_cond = X86_JBE; | |
1519 | break; | |
1520 | case BPF_JSGT: | |
1521 | /* Signed '>', GT in x86 */ | |
1522 | jmp_cond = X86_JG; | |
1523 | break; | |
1524 | case BPF_JSLT: | |
1525 | /* Signed '<', LT in x86 */ | |
1526 | jmp_cond = X86_JL; | |
1527 | break; | |
1528 | case BPF_JSGE: | |
1529 | /* Signed '>=', GE in x86 */ | |
1530 | jmp_cond = X86_JGE; | |
1531 | break; | |
1532 | case BPF_JSLE: | |
1533 | /* Signed '<=', LE in x86 */ | |
1534 | jmp_cond = X86_JLE; | |
1535 | break; | |
1536 | default: /* to silence GCC warning */ | |
1537 | return -EFAULT; | |
1538 | } | |
1539 | jmp_offset = addrs[i + insn->off] - addrs[i]; | |
1540 | if (is_imm8(jmp_offset)) { | |
1541 | if (jmp_padding) { | |
1542 | /* To keep the jmp_offset valid, the extra bytes are | |
1543 | * padded before the jump insn, so we subtract the | |
1544 | * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. | |
1545 | * | |
1546 | * If the previous pass already emits an imm8 | |
1547 | * jmp_cond, then this BPF insn won't shrink, so | |
1548 | * "nops" is 0. | |
1549 | * | |
1550 | * On the other hand, if the previous pass emits an | |
1551 | * imm32 jmp_cond, the extra 4 bytes(*) is padded to | |
1552 | * keep the image from shrinking further. | |
1553 | * | |
1554 | * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond | |
1555 | * is 2 bytes, so the size difference is 4 bytes. | |
1556 | */ | |
1557 | nops = INSN_SZ_DIFF - 2; | |
1558 | if (nops != 0 && nops != 4) { | |
1559 | pr_err("unexpected jmp_cond padding: %d bytes\n", | |
1560 | nops); | |
1561 | return -EFAULT; | |
1562 | } | |
1563 | emit_nops(&prog, nops); | |
1564 | } | |
1565 | EMIT2(jmp_cond, jmp_offset); | |
1566 | } else if (is_simm32(jmp_offset)) { | |
1567 | EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); | |
1568 | } else { | |
1569 | pr_err("cond_jmp gen bug %llx\n", jmp_offset); | |
1570 | return -EFAULT; | |
1571 | } | |
1572 | ||
1573 | break; | |
1574 | ||
1575 | case BPF_JMP | BPF_JA: | |
1576 | if (insn->off == -1) | |
1577 | /* -1 jmp instructions will always jump | |
1578 | * backwards two bytes. Explicitly handling | |
1579 | * this case avoids wasting too many passes | |
1580 | * when there are long sequences of replaced | |
1581 | * dead code. | |
1582 | */ | |
1583 | jmp_offset = -2; | |
1584 | else | |
1585 | jmp_offset = addrs[i + insn->off] - addrs[i]; | |
1586 | ||
1587 | if (!jmp_offset) { | |
1588 | /* | |
1589 | * If jmp_padding is enabled, the extra nops will | |
1590 | * be inserted. Otherwise, optimize out nop jumps. | |
1591 | */ | |
1592 | if (jmp_padding) { | |
1593 | /* There are 3 possible conditions. | |
1594 | * (1) This BPF_JA is already optimized out in | |
1595 | * the previous run, so there is no need | |
1596 | * to pad any extra byte (0 byte). | |
1597 | * (2) The previous pass emits an imm8 jmp, | |
1598 | * so we pad 2 bytes to match the previous | |
1599 | * insn size. | |
1600 | * (3) Similarly, the previous pass emits an | |
1601 | * imm32 jmp, and 5 bytes is padded. | |
1602 | */ | |
1603 | nops = INSN_SZ_DIFF; | |
1604 | if (nops != 0 && nops != 2 && nops != 5) { | |
1605 | pr_err("unexpected nop jump padding: %d bytes\n", | |
1606 | nops); | |
1607 | return -EFAULT; | |
1608 | } | |
1609 | emit_nops(&prog, nops); | |
1610 | } | |
1611 | break; | |
1612 | } | |
1613 | emit_jmp: | |
1614 | if (is_imm8(jmp_offset)) { | |
1615 | if (jmp_padding) { | |
1616 | /* To avoid breaking jmp_offset, the extra bytes | |
1617 | * are padded before the actual jmp insn, so | |
1618 | * 2 bytes is subtracted from INSN_SZ_DIFF. | |
1619 | * | |
1620 | * If the previous pass already emits an imm8 | |
1621 | * jmp, there is nothing to pad (0 byte). | |
1622 | * | |
1623 | * If it emits an imm32 jmp (5 bytes) previously | |
1624 | * and now an imm8 jmp (2 bytes), then we pad | |
1625 | * (5 - 2 = 3) bytes to stop the image from | |
1626 | * shrinking further. | |
1627 | */ | |
1628 | nops = INSN_SZ_DIFF - 2; | |
1629 | if (nops != 0 && nops != 3) { | |
1630 | pr_err("unexpected jump padding: %d bytes\n", | |
1631 | nops); | |
1632 | return -EFAULT; | |
1633 | } | |
1634 | emit_nops(&prog, INSN_SZ_DIFF - 2); | |
1635 | } | |
1636 | EMIT2(0xEB, jmp_offset); | |
1637 | } else if (is_simm32(jmp_offset)) { | |
1638 | EMIT1_off32(0xE9, jmp_offset); | |
1639 | } else { | |
1640 | pr_err("jmp gen bug %llx\n", jmp_offset); | |
1641 | return -EFAULT; | |
1642 | } | |
1643 | break; | |
1644 | ||
1645 | case BPF_JMP | BPF_EXIT: | |
1646 | if (seen_exit) { | |
1647 | jmp_offset = ctx->cleanup_addr - addrs[i]; | |
1648 | goto emit_jmp; | |
1649 | } | |
1650 | seen_exit = true; | |
1651 | /* Update cleanup_addr */ | |
1652 | ctx->cleanup_addr = proglen; | |
1653 | pop_callee_regs(&prog, callee_regs_used); | |
1654 | EMIT1(0xC9); /* leave */ | |
1655 | EMIT1(0xC3); /* ret */ | |
1656 | break; | |
1657 | ||
1658 | default: | |
1659 | /* | |
1660 | * By design x86-64 JIT should support all BPF instructions. | |
1661 | * This error will be seen if new instruction was added | |
1662 | * to the interpreter, but not to the JIT, or if there is | |
1663 | * junk in bpf_prog. | |
1664 | */ | |
1665 | pr_err("bpf_jit: unknown opcode %02x\n", insn->code); | |
1666 | return -EINVAL; | |
1667 | } | |
1668 | ||
1669 | ilen = prog - temp; | |
1670 | if (ilen > BPF_MAX_INSN_SIZE) { | |
1671 | pr_err("bpf_jit: fatal insn size error\n"); | |
1672 | return -EFAULT; | |
1673 | } | |
1674 | ||
1675 | if (image) { | |
1676 | /* | |
1677 | * When populating the image, assert that: | |
1678 | * | |
1679 | * i) We do not write beyond the allocated space, and | |
1680 | * ii) addrs[i] did not change from the prior run, in order | |
1681 | * to validate assumptions made for computing branch | |
1682 | * displacements. | |
1683 | */ | |
1684 | if (unlikely(proglen + ilen > oldproglen || | |
1685 | proglen + ilen != addrs[i])) { | |
1686 | pr_err("bpf_jit: fatal error\n"); | |
1687 | return -EFAULT; | |
1688 | } | |
1689 | memcpy(image + proglen, temp, ilen); | |
1690 | } | |
1691 | proglen += ilen; | |
1692 | addrs[i] = proglen; | |
1693 | prog = temp; | |
1694 | } | |
1695 | ||
1696 | if (image && excnt != bpf_prog->aux->num_exentries) { | |
1697 | pr_err("extable is not populated\n"); | |
1698 | return -EFAULT; | |
1699 | } | |
1700 | return proglen; | |
1701 | } | |
1702 | ||
1703 | static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, | |
1704 | int stack_size) | |
1705 | { | |
1706 | int i; | |
1707 | /* Store function arguments to stack. | |
1708 | * For a function that accepts two pointers the sequence will be: | |
1709 | * mov QWORD PTR [rbp-0x10],rdi | |
1710 | * mov QWORD PTR [rbp-0x8],rsi | |
1711 | */ | |
1712 | for (i = 0; i < min(nr_args, 6); i++) | |
1713 | emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]), | |
1714 | BPF_REG_FP, | |
1715 | i == 5 ? X86_REG_R9 : BPF_REG_1 + i, | |
1716 | -(stack_size - i * 8)); | |
1717 | } | |
1718 | ||
1719 | static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, | |
1720 | int stack_size) | |
1721 | { | |
1722 | int i; | |
1723 | ||
1724 | /* Restore function arguments from stack. | |
1725 | * For a function that accepts two pointers the sequence will be: | |
1726 | * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] | |
1727 | * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] | |
1728 | */ | |
1729 | for (i = 0; i < min(nr_args, 6); i++) | |
1730 | emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]), | |
1731 | i == 5 ? X86_REG_R9 : BPF_REG_1 + i, | |
1732 | BPF_REG_FP, | |
1733 | -(stack_size - i * 8)); | |
1734 | } | |
1735 | ||
1736 | static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, | |
1737 | struct bpf_prog *p, int stack_size, bool mod_ret) | |
1738 | { | |
1739 | u8 *prog = *pprog; | |
1740 | u8 *jmp_insn; | |
1741 | ||
1742 | /* arg1: mov rdi, progs[i] */ | |
1743 | emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); | |
1744 | if (emit_call(&prog, | |
1745 | p->aux->sleepable ? __bpf_prog_enter_sleepable : | |
1746 | __bpf_prog_enter, prog)) | |
1747 | return -EINVAL; | |
1748 | /* remember prog start time returned by __bpf_prog_enter */ | |
1749 | emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); | |
1750 | ||
1751 | /* if (__bpf_prog_enter*(prog) == 0) | |
1752 | * goto skip_exec_of_prog; | |
1753 | */ | |
1754 | EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ | |
1755 | /* emit 2 nops that will be replaced with JE insn */ | |
1756 | jmp_insn = prog; | |
1757 | emit_nops(&prog, 2); | |
1758 | ||
1759 | /* arg1: lea rdi, [rbp - stack_size] */ | |
1760 | EMIT4(0x48, 0x8D, 0x7D, -stack_size); | |
1761 | /* arg2: progs[i]->insnsi for interpreter */ | |
1762 | if (!p->jited) | |
1763 | emit_mov_imm64(&prog, BPF_REG_2, | |
1764 | (long) p->insnsi >> 32, | |
1765 | (u32) (long) p->insnsi); | |
1766 | /* call JITed bpf program or interpreter */ | |
1767 | if (emit_call(&prog, p->bpf_func, prog)) | |
1768 | return -EINVAL; | |
1769 | ||
1770 | /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return | |
1771 | * of the previous call which is then passed on the stack to | |
1772 | * the next BPF program. | |
1773 | */ | |
1774 | if (mod_ret) | |
1775 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); | |
1776 | ||
1777 | /* replace 2 nops with JE insn, since jmp target is known */ | |
1778 | jmp_insn[0] = X86_JE; | |
1779 | jmp_insn[1] = prog - jmp_insn - 2; | |
1780 | ||
1781 | /* arg1: mov rdi, progs[i] */ | |
1782 | emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); | |
1783 | /* arg2: mov rsi, rbx <- start time in nsec */ | |
1784 | emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); | |
1785 | if (emit_call(&prog, | |
1786 | p->aux->sleepable ? __bpf_prog_exit_sleepable : | |
1787 | __bpf_prog_exit, prog)) | |
1788 | return -EINVAL; | |
1789 | ||
1790 | *pprog = prog; | |
1791 | return 0; | |
1792 | } | |
1793 | ||
1794 | static void emit_align(u8 **pprog, u32 align) | |
1795 | { | |
1796 | u8 *target, *prog = *pprog; | |
1797 | ||
1798 | target = PTR_ALIGN(prog, align); | |
1799 | if (target != prog) | |
1800 | emit_nops(&prog, target - prog); | |
1801 | ||
1802 | *pprog = prog; | |
1803 | } | |
1804 | ||
1805 | static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) | |
1806 | { | |
1807 | u8 *prog = *pprog; | |
1808 | s64 offset; | |
1809 | ||
1810 | offset = func - (ip + 2 + 4); | |
1811 | if (!is_simm32(offset)) { | |
1812 | pr_err("Target %p is out of range\n", func); | |
1813 | return -EINVAL; | |
1814 | } | |
1815 | EMIT2_off32(0x0F, jmp_cond + 0x10, offset); | |
1816 | *pprog = prog; | |
1817 | return 0; | |
1818 | } | |
1819 | ||
1820 | static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, | |
1821 | struct bpf_tramp_progs *tp, int stack_size) | |
1822 | { | |
1823 | int i; | |
1824 | u8 *prog = *pprog; | |
1825 | ||
1826 | for (i = 0; i < tp->nr_progs; i++) { | |
1827 | if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false)) | |
1828 | return -EINVAL; | |
1829 | } | |
1830 | *pprog = prog; | |
1831 | return 0; | |
1832 | } | |
1833 | ||
1834 | static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, | |
1835 | struct bpf_tramp_progs *tp, int stack_size, | |
1836 | u8 **branches) | |
1837 | { | |
1838 | u8 *prog = *pprog; | |
1839 | int i; | |
1840 | ||
1841 | /* The first fmod_ret program will receive a garbage return value. | |
1842 | * Set this to 0 to avoid confusing the program. | |
1843 | */ | |
1844 | emit_mov_imm32(&prog, false, BPF_REG_0, 0); | |
1845 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); | |
1846 | for (i = 0; i < tp->nr_progs; i++) { | |
1847 | if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true)) | |
1848 | return -EINVAL; | |
1849 | ||
1850 | /* mod_ret prog stored return value into [rbp - 8]. Emit: | |
1851 | * if (*(u64 *)(rbp - 8) != 0) | |
1852 | * goto do_fexit; | |
1853 | */ | |
1854 | /* cmp QWORD PTR [rbp - 0x8], 0x0 */ | |
1855 | EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); | |
1856 | ||
1857 | /* Save the location of the branch and Generate 6 nops | |
1858 | * (4 bytes for an offset and 2 bytes for the jump) These nops | |
1859 | * are replaced with a conditional jump once do_fexit (i.e. the | |
1860 | * start of the fexit invocation) is finalized. | |
1861 | */ | |
1862 | branches[i] = prog; | |
1863 | emit_nops(&prog, 4 + 2); | |
1864 | } | |
1865 | ||
1866 | *pprog = prog; | |
1867 | return 0; | |
1868 | } | |
1869 | ||
1870 | /* Example: | |
1871 | * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); | |
1872 | * its 'struct btf_func_model' will be nr_args=2 | |
1873 | * The assembly code when eth_type_trans is executing after trampoline: | |
1874 | * | |
1875 | * push rbp | |
1876 | * mov rbp, rsp | |
1877 | * sub rsp, 16 // space for skb and dev | |
1878 | * push rbx // temp regs to pass start time | |
1879 | * mov qword ptr [rbp - 16], rdi // save skb pointer to stack | |
1880 | * mov qword ptr [rbp - 8], rsi // save dev pointer to stack | |
1881 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable | |
1882 | * mov rbx, rax // remember start time in bpf stats are enabled | |
1883 | * lea rdi, [rbp - 16] // R1==ctx of bpf prog | |
1884 | * call addr_of_jited_FENTRY_prog | |
1885 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off | |
1886 | * mov rsi, rbx // prog start time | |
1887 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math | |
1888 | * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack | |
1889 | * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack | |
1890 | * pop rbx | |
1891 | * leave | |
1892 | * ret | |
1893 | * | |
1894 | * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be | |
1895 | * replaced with 'call generated_bpf_trampoline'. When it returns | |
1896 | * eth_type_trans will continue executing with original skb and dev pointers. | |
1897 | * | |
1898 | * The assembly code when eth_type_trans is called from trampoline: | |
1899 | * | |
1900 | * push rbp | |
1901 | * mov rbp, rsp | |
1902 | * sub rsp, 24 // space for skb, dev, return value | |
1903 | * push rbx // temp regs to pass start time | |
1904 | * mov qword ptr [rbp - 24], rdi // save skb pointer to stack | |
1905 | * mov qword ptr [rbp - 16], rsi // save dev pointer to stack | |
1906 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable | |
1907 | * mov rbx, rax // remember start time if bpf stats are enabled | |
1908 | * lea rdi, [rbp - 24] // R1==ctx of bpf prog | |
1909 | * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev | |
1910 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off | |
1911 | * mov rsi, rbx // prog start time | |
1912 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math | |
1913 | * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack | |
1914 | * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack | |
1915 | * call eth_type_trans+5 // execute body of eth_type_trans | |
1916 | * mov qword ptr [rbp - 8], rax // save return value | |
1917 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable | |
1918 | * mov rbx, rax // remember start time in bpf stats are enabled | |
1919 | * lea rdi, [rbp - 24] // R1==ctx of bpf prog | |
1920 | * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value | |
1921 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off | |
1922 | * mov rsi, rbx // prog start time | |
1923 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math | |
1924 | * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value | |
1925 | * pop rbx | |
1926 | * leave | |
1927 | * add rsp, 8 // skip eth_type_trans's frame | |
1928 | * ret // return to its caller | |
1929 | */ | |
1930 | int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, | |
1931 | const struct btf_func_model *m, u32 flags, | |
1932 | struct bpf_tramp_progs *tprogs, | |
1933 | void *orig_call) | |
1934 | { | |
1935 | int ret, i, nr_args = m->nr_args; | |
1936 | int stack_size = nr_args * 8; | |
1937 | struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY]; | |
1938 | struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT]; | |
1939 | struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; | |
1940 | u8 **branches = NULL; | |
1941 | u8 *prog; | |
1942 | ||
1943 | /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ | |
1944 | if (nr_args > 6) | |
1945 | return -ENOTSUPP; | |
1946 | ||
1947 | if ((flags & BPF_TRAMP_F_RESTORE_REGS) && | |
1948 | (flags & BPF_TRAMP_F_SKIP_FRAME)) | |
1949 | return -EINVAL; | |
1950 | ||
1951 | if (flags & BPF_TRAMP_F_CALL_ORIG) | |
1952 | stack_size += 8; /* room for return value of orig_call */ | |
1953 | ||
1954 | if (flags & BPF_TRAMP_F_SKIP_FRAME) | |
1955 | /* skip patched call instruction and point orig_call to actual | |
1956 | * body of the kernel function. | |
1957 | */ | |
1958 | orig_call += X86_PATCH_SIZE; | |
1959 | ||
1960 | prog = image; | |
1961 | ||
1962 | EMIT1(0x55); /* push rbp */ | |
1963 | EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ | |
1964 | EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ | |
1965 | EMIT1(0x53); /* push rbx */ | |
1966 | ||
1967 | save_regs(m, &prog, nr_args, stack_size); | |
1968 | ||
1969 | if (flags & BPF_TRAMP_F_CALL_ORIG) { | |
1970 | /* arg1: mov rdi, im */ | |
1971 | emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); | |
1972 | if (emit_call(&prog, __bpf_tramp_enter, prog)) { | |
1973 | ret = -EINVAL; | |
1974 | goto cleanup; | |
1975 | } | |
1976 | } | |
1977 | ||
1978 | if (fentry->nr_progs) | |
1979 | if (invoke_bpf(m, &prog, fentry, stack_size)) | |
1980 | return -EINVAL; | |
1981 | ||
1982 | if (fmod_ret->nr_progs) { | |
1983 | branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *), | |
1984 | GFP_KERNEL); | |
1985 | if (!branches) | |
1986 | return -ENOMEM; | |
1987 | ||
1988 | if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size, | |
1989 | branches)) { | |
1990 | ret = -EINVAL; | |
1991 | goto cleanup; | |
1992 | } | |
1993 | } | |
1994 | ||
1995 | if (flags & BPF_TRAMP_F_CALL_ORIG) { | |
1996 | restore_regs(m, &prog, nr_args, stack_size); | |
1997 | ||
1998 | /* call original function */ | |
1999 | if (emit_call(&prog, orig_call, prog)) { | |
2000 | ret = -EINVAL; | |
2001 | goto cleanup; | |
2002 | } | |
2003 | /* remember return value in a stack for bpf prog to access */ | |
2004 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); | |
2005 | im->ip_after_call = prog; | |
2006 | memcpy(prog, x86_nops[5], X86_PATCH_SIZE); | |
2007 | prog += X86_PATCH_SIZE; | |
2008 | } | |
2009 | ||
2010 | if (fmod_ret->nr_progs) { | |
2011 | /* From Intel 64 and IA-32 Architectures Optimization | |
2012 | * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler | |
2013 | * Coding Rule 11: All branch targets should be 16-byte | |
2014 | * aligned. | |
2015 | */ | |
2016 | emit_align(&prog, 16); | |
2017 | /* Update the branches saved in invoke_bpf_mod_ret with the | |
2018 | * aligned address of do_fexit. | |
2019 | */ | |
2020 | for (i = 0; i < fmod_ret->nr_progs; i++) | |
2021 | emit_cond_near_jump(&branches[i], prog, branches[i], | |
2022 | X86_JNE); | |
2023 | } | |
2024 | ||
2025 | if (fexit->nr_progs) | |
2026 | if (invoke_bpf(m, &prog, fexit, stack_size)) { | |
2027 | ret = -EINVAL; | |
2028 | goto cleanup; | |
2029 | } | |
2030 | ||
2031 | if (flags & BPF_TRAMP_F_RESTORE_REGS) | |
2032 | restore_regs(m, &prog, nr_args, stack_size); | |
2033 | ||
2034 | /* This needs to be done regardless. If there were fmod_ret programs, | |
2035 | * the return value is only updated on the stack and still needs to be | |
2036 | * restored to R0. | |
2037 | */ | |
2038 | if (flags & BPF_TRAMP_F_CALL_ORIG) { | |
2039 | im->ip_epilogue = prog; | |
2040 | /* arg1: mov rdi, im */ | |
2041 | emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); | |
2042 | if (emit_call(&prog, __bpf_tramp_exit, prog)) { | |
2043 | ret = -EINVAL; | |
2044 | goto cleanup; | |
2045 | } | |
2046 | /* restore original return value back into RAX */ | |
2047 | emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); | |
2048 | } | |
2049 | ||
2050 | EMIT1(0x5B); /* pop rbx */ | |
2051 | EMIT1(0xC9); /* leave */ | |
2052 | if (flags & BPF_TRAMP_F_SKIP_FRAME) | |
2053 | /* skip our return address and return to parent */ | |
2054 | EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ | |
2055 | EMIT1(0xC3); /* ret */ | |
2056 | /* Make sure the trampoline generation logic doesn't overflow */ | |
2057 | if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { | |
2058 | ret = -EFAULT; | |
2059 | goto cleanup; | |
2060 | } | |
2061 | ret = prog - (u8 *)image; | |
2062 | ||
2063 | cleanup: | |
2064 | kfree(branches); | |
2065 | return ret; | |
2066 | } | |
2067 | ||
2068 | static int emit_fallback_jump(u8 **pprog) | |
2069 | { | |
2070 | u8 *prog = *pprog; | |
2071 | int err = 0; | |
2072 | ||
2073 | #ifdef CONFIG_RETPOLINE | |
2074 | /* Note that this assumes the the compiler uses external | |
2075 | * thunks for indirect calls. Both clang and GCC use the same | |
2076 | * naming convention for external thunks. | |
2077 | */ | |
2078 | err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog); | |
2079 | #else | |
2080 | EMIT2(0xFF, 0xE2); /* jmp rdx */ | |
2081 | #endif | |
2082 | *pprog = prog; | |
2083 | return err; | |
2084 | } | |
2085 | ||
2086 | static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) | |
2087 | { | |
2088 | u8 *jg_reloc, *prog = *pprog; | |
2089 | int pivot, err, jg_bytes = 1; | |
2090 | s64 jg_offset; | |
2091 | ||
2092 | if (a == b) { | |
2093 | /* Leaf node of recursion, i.e. not a range of indices | |
2094 | * anymore. | |
2095 | */ | |
2096 | EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ | |
2097 | if (!is_simm32(progs[a])) | |
2098 | return -1; | |
2099 | EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), | |
2100 | progs[a]); | |
2101 | err = emit_cond_near_jump(&prog, /* je func */ | |
2102 | (void *)progs[a], prog, | |
2103 | X86_JE); | |
2104 | if (err) | |
2105 | return err; | |
2106 | ||
2107 | err = emit_fallback_jump(&prog); /* jmp thunk/indirect */ | |
2108 | if (err) | |
2109 | return err; | |
2110 | ||
2111 | *pprog = prog; | |
2112 | return 0; | |
2113 | } | |
2114 | ||
2115 | /* Not a leaf node, so we pivot, and recursively descend into | |
2116 | * the lower and upper ranges. | |
2117 | */ | |
2118 | pivot = (b - a) / 2; | |
2119 | EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ | |
2120 | if (!is_simm32(progs[a + pivot])) | |
2121 | return -1; | |
2122 | EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); | |
2123 | ||
2124 | if (pivot > 2) { /* jg upper_part */ | |
2125 | /* Require near jump. */ | |
2126 | jg_bytes = 4; | |
2127 | EMIT2_off32(0x0F, X86_JG + 0x10, 0); | |
2128 | } else { | |
2129 | EMIT2(X86_JG, 0); | |
2130 | } | |
2131 | jg_reloc = prog; | |
2132 | ||
2133 | err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ | |
2134 | progs); | |
2135 | if (err) | |
2136 | return err; | |
2137 | ||
2138 | /* From Intel 64 and IA-32 Architectures Optimization | |
2139 | * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler | |
2140 | * Coding Rule 11: All branch targets should be 16-byte | |
2141 | * aligned. | |
2142 | */ | |
2143 | emit_align(&prog, 16); | |
2144 | jg_offset = prog - jg_reloc; | |
2145 | emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); | |
2146 | ||
2147 | err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ | |
2148 | b, progs); | |
2149 | if (err) | |
2150 | return err; | |
2151 | ||
2152 | *pprog = prog; | |
2153 | return 0; | |
2154 | } | |
2155 | ||
2156 | static int cmp_ips(const void *a, const void *b) | |
2157 | { | |
2158 | const s64 *ipa = a; | |
2159 | const s64 *ipb = b; | |
2160 | ||
2161 | if (*ipa > *ipb) | |
2162 | return 1; | |
2163 | if (*ipa < *ipb) | |
2164 | return -1; | |
2165 | return 0; | |
2166 | } | |
2167 | ||
2168 | int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) | |
2169 | { | |
2170 | u8 *prog = image; | |
2171 | ||
2172 | sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); | |
2173 | return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs); | |
2174 | } | |
2175 | ||
2176 | struct x64_jit_data { | |
2177 | struct bpf_binary_header *header; | |
2178 | int *addrs; | |
2179 | u8 *image; | |
2180 | int proglen; | |
2181 | struct jit_context ctx; | |
2182 | }; | |
2183 | ||
2184 | #define MAX_PASSES 20 | |
2185 | #define PADDING_PASSES (MAX_PASSES - 5) | |
2186 | ||
2187 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) | |
2188 | { | |
2189 | struct bpf_binary_header *header = NULL; | |
2190 | struct bpf_prog *tmp, *orig_prog = prog; | |
2191 | struct x64_jit_data *jit_data; | |
2192 | int proglen, oldproglen = 0; | |
2193 | struct jit_context ctx = {}; | |
2194 | bool tmp_blinded = false; | |
2195 | bool extra_pass = false; | |
2196 | bool padding = false; | |
2197 | u8 *image = NULL; | |
2198 | int *addrs; | |
2199 | int pass; | |
2200 | int i; | |
2201 | ||
2202 | if (!prog->jit_requested) | |
2203 | return orig_prog; | |
2204 | ||
2205 | tmp = bpf_jit_blind_constants(prog); | |
2206 | /* | |
2207 | * If blinding was requested and we failed during blinding, | |
2208 | * we must fall back to the interpreter. | |
2209 | */ | |
2210 | if (IS_ERR(tmp)) | |
2211 | return orig_prog; | |
2212 | if (tmp != prog) { | |
2213 | tmp_blinded = true; | |
2214 | prog = tmp; | |
2215 | } | |
2216 | ||
2217 | jit_data = prog->aux->jit_data; | |
2218 | if (!jit_data) { | |
2219 | jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); | |
2220 | if (!jit_data) { | |
2221 | prog = orig_prog; | |
2222 | goto out; | |
2223 | } | |
2224 | prog->aux->jit_data = jit_data; | |
2225 | } | |
2226 | addrs = jit_data->addrs; | |
2227 | if (addrs) { | |
2228 | ctx = jit_data->ctx; | |
2229 | oldproglen = jit_data->proglen; | |
2230 | image = jit_data->image; | |
2231 | header = jit_data->header; | |
2232 | extra_pass = true; | |
2233 | padding = true; | |
2234 | goto skip_init_addrs; | |
2235 | } | |
2236 | addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); | |
2237 | if (!addrs) { | |
2238 | prog = orig_prog; | |
2239 | goto out_addrs; | |
2240 | } | |
2241 | ||
2242 | /* | |
2243 | * Before first pass, make a rough estimation of addrs[] | |
2244 | * each BPF instruction is translated to less than 64 bytes | |
2245 | */ | |
2246 | for (proglen = 0, i = 0; i <= prog->len; i++) { | |
2247 | proglen += 64; | |
2248 | addrs[i] = proglen; | |
2249 | } | |
2250 | ctx.cleanup_addr = proglen; | |
2251 | skip_init_addrs: | |
2252 | ||
2253 | /* | |
2254 | * JITed image shrinks with every pass and the loop iterates | |
2255 | * until the image stops shrinking. Very large BPF programs | |
2256 | * may converge on the last pass. In such case do one more | |
2257 | * pass to emit the final image. | |
2258 | */ | |
2259 | for (pass = 0; pass < MAX_PASSES || image; pass++) { | |
2260 | if (!padding && pass >= PADDING_PASSES) | |
2261 | padding = true; | |
2262 | proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding); | |
2263 | if (proglen <= 0) { | |
2264 | out_image: | |
2265 | image = NULL; | |
2266 | if (header) | |
2267 | bpf_jit_binary_free(header); | |
2268 | prog = orig_prog; | |
2269 | goto out_addrs; | |
2270 | } | |
2271 | if (image) { | |
2272 | if (proglen != oldproglen) { | |
2273 | pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", | |
2274 | proglen, oldproglen); | |
2275 | goto out_image; | |
2276 | } | |
2277 | break; | |
2278 | } | |
2279 | if (proglen == oldproglen) { | |
2280 | /* | |
2281 | * The number of entries in extable is the number of BPF_LDX | |
2282 | * insns that access kernel memory via "pointer to BTF type". | |
2283 | * The verifier changed their opcode from LDX|MEM|size | |
2284 | * to LDX|PROBE_MEM|size to make JITing easier. | |
2285 | */ | |
2286 | u32 align = __alignof__(struct exception_table_entry); | |
2287 | u32 extable_size = prog->aux->num_exentries * | |
2288 | sizeof(struct exception_table_entry); | |
2289 | ||
2290 | /* allocate module memory for x86 insns and extable */ | |
2291 | header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size, | |
2292 | &image, align, jit_fill_hole); | |
2293 | if (!header) { | |
2294 | prog = orig_prog; | |
2295 | goto out_addrs; | |
2296 | } | |
2297 | prog->aux->extable = (void *) image + roundup(proglen, align); | |
2298 | } | |
2299 | oldproglen = proglen; | |
2300 | cond_resched(); | |
2301 | } | |
2302 | ||
2303 | if (bpf_jit_enable > 1) | |
2304 | bpf_jit_dump(prog->len, proglen, pass + 1, image); | |
2305 | ||
2306 | if (image) { | |
2307 | if (!prog->is_func || extra_pass) { | |
2308 | bpf_tail_call_direct_fixup(prog); | |
2309 | bpf_jit_binary_lock_ro(header); | |
2310 | } else { | |
2311 | jit_data->addrs = addrs; | |
2312 | jit_data->ctx = ctx; | |
2313 | jit_data->proglen = proglen; | |
2314 | jit_data->image = image; | |
2315 | jit_data->header = header; | |
2316 | } | |
2317 | prog->bpf_func = (void *)image; | |
2318 | prog->jited = 1; | |
2319 | prog->jited_len = proglen; | |
2320 | } else { | |
2321 | prog = orig_prog; | |
2322 | } | |
2323 | ||
2324 | if (!image || !prog->is_func || extra_pass) { | |
2325 | if (image) | |
2326 | bpf_prog_fill_jited_linfo(prog, addrs + 1); | |
2327 | out_addrs: | |
2328 | kvfree(addrs); | |
2329 | kfree(jit_data); | |
2330 | prog->aux->jit_data = NULL; | |
2331 | } | |
2332 | out: | |
2333 | if (tmp_blinded) | |
2334 | bpf_jit_prog_release_other(prog, prog == orig_prog ? | |
2335 | tmp : orig_prog); | |
2336 | return prog; | |
2337 | } | |
2338 | ||
2339 | bool bpf_jit_supports_kfunc_call(void) | |
2340 | { | |
2341 | return true; | |
2342 | } |