1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/ftrace.h>
16 #include <asm/set_memory.h>
17 #include <asm/nospec-branch.h>
18 #include <asm/text-patching.h>
19 #include <asm/unwind.h>
22 static bool all_callee_regs_used
[4] = {true, true, true, true};
24 static u8
*emit_code(u8
*ptr
, u32 bytes
, unsigned int len
)
37 #define EMIT(bytes, len) \
38 do { prog = emit_code(prog, bytes, len); } while (0)
40 #define EMIT1(b1) EMIT(b1, 1)
41 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
42 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
43 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
45 #define EMIT1_off32(b1, off) \
46 do { EMIT1(b1); EMIT(off, 4); } while (0)
47 #define EMIT2_off32(b1, b2, off) \
48 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
49 #define EMIT3_off32(b1, b2, b3, off) \
50 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
51 #define EMIT4_off32(b1, b2, b3, b4, off) \
52 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
54 #ifdef CONFIG_X86_KERNEL_IBT
55 #define EMIT_ENDBR() EMIT(gen_endbr(), 4)
56 #define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4)
59 #define EMIT_ENDBR_POISON()
62 static bool is_imm8(int value
)
64 return value
<= 127 && value
>= -128;
67 static bool is_simm32(s64 value
)
69 return value
== (s64
)(s32
)value
;
72 static bool is_uimm32(u64 value
)
74 return value
== (u64
)(u32
)value
;
78 #define EMIT_mov(DST, SRC) \
81 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
84 static int bpf_size_to_x86_bytes(int bpf_size
)
86 if (bpf_size
== BPF_W
)
88 else if (bpf_size
== BPF_H
)
90 else if (bpf_size
== BPF_B
)
92 else if (bpf_size
== BPF_DW
)
99 * List of x86 cond jumps opcodes (. + s8)
100 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
113 /* Pick a register outside of BPF range for JIT internal work */
114 #define AUX_REG (MAX_BPF_JIT_REG + 1)
115 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
118 * The following table maps BPF registers to x86-64 registers.
120 * x86-64 register R12 is unused, since if used as base address
121 * register in load/store instructions, it always needs an
122 * extra byte of encoding and is callee saved.
124 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
125 * trampoline. x86-64 register R10 is used for blinding (if enabled).
127 static const int reg2hex
[] = {
128 [BPF_REG_0
] = 0, /* RAX */
129 [BPF_REG_1
] = 7, /* RDI */
130 [BPF_REG_2
] = 6, /* RSI */
131 [BPF_REG_3
] = 2, /* RDX */
132 [BPF_REG_4
] = 1, /* RCX */
133 [BPF_REG_5
] = 0, /* R8 */
134 [BPF_REG_6
] = 3, /* RBX callee saved */
135 [BPF_REG_7
] = 5, /* R13 callee saved */
136 [BPF_REG_8
] = 6, /* R14 callee saved */
137 [BPF_REG_9
] = 7, /* R15 callee saved */
138 [BPF_REG_FP
] = 5, /* RBP readonly */
139 [BPF_REG_AX
] = 2, /* R10 temp register */
140 [AUX_REG
] = 3, /* R11 temp register */
141 [X86_REG_R9
] = 1, /* R9 register, 6th function argument */
144 static const int reg2pt_regs
[] = {
145 [BPF_REG_0
] = offsetof(struct pt_regs
, ax
),
146 [BPF_REG_1
] = offsetof(struct pt_regs
, di
),
147 [BPF_REG_2
] = offsetof(struct pt_regs
, si
),
148 [BPF_REG_3
] = offsetof(struct pt_regs
, dx
),
149 [BPF_REG_4
] = offsetof(struct pt_regs
, cx
),
150 [BPF_REG_5
] = offsetof(struct pt_regs
, r8
),
151 [BPF_REG_6
] = offsetof(struct pt_regs
, bx
),
152 [BPF_REG_7
] = offsetof(struct pt_regs
, r13
),
153 [BPF_REG_8
] = offsetof(struct pt_regs
, r14
),
154 [BPF_REG_9
] = offsetof(struct pt_regs
, r15
),
158 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
159 * which need extra byte of encoding.
160 * rax,rcx,...,rbp have simpler encoding
162 static bool is_ereg(u32 reg
)
164 return (1 << reg
) & (BIT(BPF_REG_5
) |
174 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
175 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
176 * of encoding. al,cl,dl,bl have simpler encoding.
178 static bool is_ereg_8l(u32 reg
)
180 return is_ereg(reg
) ||
181 (1 << reg
) & (BIT(BPF_REG_1
) |
186 static bool is_axreg(u32 reg
)
188 return reg
== BPF_REG_0
;
191 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
192 static u8
add_1mod(u8 byte
, u32 reg
)
199 static u8
add_2mod(u8 byte
, u32 r1
, u32 r2
)
208 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
209 static u8
add_1reg(u8 byte
, u32 dst_reg
)
211 return byte
+ reg2hex
[dst_reg
];
214 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
215 static u8
add_2reg(u8 byte
, u32 dst_reg
, u32 src_reg
)
217 return byte
+ reg2hex
[dst_reg
] + (reg2hex
[src_reg
] << 3);
220 /* Some 1-byte opcodes for binary ALU operations */
221 static u8 simple_alu_opcodes
[] = {
232 static void jit_fill_hole(void *area
, unsigned int size
)
234 /* Fill whole space with INT3 instructions */
235 memset(area
, 0xcc, size
);
238 int bpf_arch_text_invalidate(void *dst
, size_t len
)
240 return IS_ERR_OR_NULL(text_poke_set(dst
, 0xcc, len
));
244 int cleanup_addr
; /* Epilogue code offset */
247 * Program specific offsets of labels in the code; these rely on the
248 * JIT doing at least 2 passes, recording the position on the first
249 * pass, only to generate the correct offset on the second pass.
251 int tail_call_direct_label
;
252 int tail_call_indirect_label
;
255 /* Maximum number of bytes emitted while JITing one eBPF insn */
256 #define BPF_MAX_INSN_SIZE 128
257 #define BPF_INSN_SAFETY 64
259 /* Number of bytes emit_patch() needs to generate instructions */
260 #define X86_PATCH_SIZE 5
261 /* Number of bytes that will be skipped on tailcall */
262 #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE)
264 static void push_r12(u8
**pprog
)
268 EMIT2(0x41, 0x54); /* push r12 */
272 static void push_callee_regs(u8
**pprog
, bool *callee_regs_used
)
276 if (callee_regs_used
[0])
277 EMIT1(0x53); /* push rbx */
278 if (callee_regs_used
[1])
279 EMIT2(0x41, 0x55); /* push r13 */
280 if (callee_regs_used
[2])
281 EMIT2(0x41, 0x56); /* push r14 */
282 if (callee_regs_used
[3])
283 EMIT2(0x41, 0x57); /* push r15 */
287 static void pop_r12(u8
**pprog
)
291 EMIT2(0x41, 0x5C); /* pop r12 */
295 static void pop_callee_regs(u8
**pprog
, bool *callee_regs_used
)
299 if (callee_regs_used
[3])
300 EMIT2(0x41, 0x5F); /* pop r15 */
301 if (callee_regs_used
[2])
302 EMIT2(0x41, 0x5E); /* pop r14 */
303 if (callee_regs_used
[1])
304 EMIT2(0x41, 0x5D); /* pop r13 */
305 if (callee_regs_used
[0])
306 EMIT1(0x5B); /* pop rbx */
310 static void emit_nops(u8
**pprog
, int len
)
318 if (noplen
> ASM_NOP_MAX
)
319 noplen
= ASM_NOP_MAX
;
321 for (i
= 0; i
< noplen
; i
++)
322 EMIT1(x86_nops
[noplen
][i
]);
330 * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
331 * in arch/x86/kernel/alternative.c
334 static void emit_fineibt(u8
**pprog
, u32 hash
)
339 EMIT3_off32(0x41, 0x81, 0xea, hash
); /* subl $hash, %r10d */
340 EMIT2(0x74, 0x07); /* jz.d8 +7 */
341 EMIT2(0x0f, 0x0b); /* ud2 */
342 EMIT1(0x90); /* nop */
348 static void emit_kcfi(u8
**pprog
, u32 hash
)
352 EMIT1_off32(0xb8, hash
); /* movl $hash, %eax */
353 #ifdef CONFIG_CALL_PADDING
371 static void emit_cfi(u8
**pprog
, u32 hash
)
377 emit_fineibt(&prog
, hash
);
381 emit_kcfi(&prog
, hash
);
393 * Emit x86-64 prologue code for BPF program.
394 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
395 * while jumping to another program
397 static void emit_prologue(u8
**pprog
, u32 stack_depth
, bool ebpf_from_cbpf
,
398 bool tail_call_reachable
, bool is_subprog
,
399 bool is_exception_cb
)
403 emit_cfi(&prog
, is_subprog
? cfi_bpf_subprog_hash
: cfi_bpf_hash
);
404 /* BPF trampoline can be made to work without these nops,
405 * but let's waste 5 bytes for now and optimize later
407 emit_nops(&prog
, X86_PATCH_SIZE
);
408 if (!ebpf_from_cbpf
) {
409 if (tail_call_reachable
&& !is_subprog
)
410 /* When it's the entry of the whole tailcall context,
411 * zeroing rax means initialising tail_call_cnt.
413 EMIT2(0x31, 0xC0); /* xor eax, eax */
415 /* Keep the same instruction layout. */
416 EMIT2(0x66, 0x90); /* nop2 */
418 /* Exception callback receives FP as third parameter */
419 if (is_exception_cb
) {
420 EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
421 EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
422 /* The main frame must have exception_boundary as true, so we
423 * first restore those callee-saved regs from stack, before
424 * reusing the stack frame.
426 pop_callee_regs(&prog
, all_callee_regs_used
);
428 /* Reset the stack frame. */
429 EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
431 EMIT1(0x55); /* push rbp */
432 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
435 /* X86_TAIL_CALL_OFFSET is here */
438 /* sub rsp, rounded_stack_depth */
440 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth
, 8));
441 if (tail_call_reachable
)
442 EMIT1(0x50); /* push rax */
446 static int emit_patch(u8
**pprog
, void *func
, void *ip
, u8 opcode
)
451 offset
= func
- (ip
+ X86_PATCH_SIZE
);
452 if (!is_simm32(offset
)) {
453 pr_err("Target call %p is out of range\n", func
);
456 EMIT1_off32(opcode
, offset
);
461 static int emit_call(u8
**pprog
, void *func
, void *ip
)
463 return emit_patch(pprog
, func
, ip
, 0xE8);
466 static int emit_rsb_call(u8
**pprog
, void *func
, void *ip
)
468 OPTIMIZER_HIDE_VAR(func
);
469 x86_call_depth_emit_accounting(pprog
, func
);
470 return emit_patch(pprog
, func
, ip
, 0xE8);
473 static int emit_jump(u8
**pprog
, void *func
, void *ip
)
475 return emit_patch(pprog
, func
, ip
, 0xE9);
478 static int __bpf_arch_text_poke(void *ip
, enum bpf_text_poke_type t
,
479 void *old_addr
, void *new_addr
)
481 const u8
*nop_insn
= x86_nops
[5];
482 u8 old_insn
[X86_PATCH_SIZE
];
483 u8 new_insn
[X86_PATCH_SIZE
];
487 memcpy(old_insn
, nop_insn
, X86_PATCH_SIZE
);
490 ret
= t
== BPF_MOD_CALL
?
491 emit_call(&prog
, old_addr
, ip
) :
492 emit_jump(&prog
, old_addr
, ip
);
497 memcpy(new_insn
, nop_insn
, X86_PATCH_SIZE
);
500 ret
= t
== BPF_MOD_CALL
?
501 emit_call(&prog
, new_addr
, ip
) :
502 emit_jump(&prog
, new_addr
, ip
);
508 mutex_lock(&text_mutex
);
509 if (memcmp(ip
, old_insn
, X86_PATCH_SIZE
))
512 if (memcmp(ip
, new_insn
, X86_PATCH_SIZE
)) {
513 text_poke_bp(ip
, new_insn
, X86_PATCH_SIZE
, NULL
);
517 mutex_unlock(&text_mutex
);
521 int bpf_arch_text_poke(void *ip
, enum bpf_text_poke_type t
,
522 void *old_addr
, void *new_addr
)
524 if (!is_kernel_text((long)ip
) &&
525 !is_bpf_text_address((long)ip
))
526 /* BPF poking in modules is not supported */
530 * See emit_prologue(), for IBT builds the trampoline hook is preceded
531 * with an ENDBR instruction.
533 if (is_endbr(*(u32
*)ip
))
534 ip
+= ENDBR_INSN_SIZE
;
536 return __bpf_arch_text_poke(ip
, t
, old_addr
, new_addr
);
539 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
541 static void emit_indirect_jump(u8
**pprog
, int reg
, u8
*ip
)
545 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE
)) {
547 EMIT2(0xFF, 0xE0 + reg
);
548 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE
)) {
549 OPTIMIZER_HIDE_VAR(reg
);
550 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH
))
551 emit_jump(&prog
, &__x86_indirect_jump_thunk_array
[reg
], ip
);
553 emit_jump(&prog
, &__x86_indirect_thunk_array
[reg
], ip
);
555 EMIT2(0xFF, 0xE0 + reg
); /* jmp *%\reg */
556 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE
) || IS_ENABLED(CONFIG_MITIGATION_SLS
))
557 EMIT1(0xCC); /* int3 */
563 static void emit_return(u8
**pprog
, u8
*ip
)
567 if (cpu_feature_enabled(X86_FEATURE_RETHUNK
)) {
568 emit_jump(&prog
, x86_return_thunk
, ip
);
570 EMIT1(0xC3); /* ret */
571 if (IS_ENABLED(CONFIG_MITIGATION_SLS
))
572 EMIT1(0xCC); /* int3 */
579 * Generate the following code:
581 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
582 * if (index >= array->map.max_entries)
584 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
586 * prog = array->ptrs[index];
589 * goto *(prog->bpf_func + prologue_size);
592 static void emit_bpf_tail_call_indirect(struct bpf_prog
*bpf_prog
,
593 u8
**pprog
, bool *callee_regs_used
,
594 u32 stack_depth
, u8
*ip
,
595 struct jit_context
*ctx
)
597 int tcc_off
= -4 - round_up(stack_depth
, 8);
598 u8
*prog
= *pprog
, *start
= *pprog
;
602 * rdi - pointer to ctx
603 * rsi - pointer to bpf_array
604 * rdx - index in bpf_array
608 * if (index >= array->map.max_entries)
611 EMIT2(0x89, 0xD2); /* mov edx, edx */
612 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
613 offsetof(struct bpf_array
, map
.max_entries
));
615 offset
= ctx
->tail_call_indirect_label
- (prog
+ 2 - start
);
616 EMIT2(X86_JBE
, offset
); /* jbe out */
619 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
622 EMIT2_off32(0x8B, 0x85, tcc_off
); /* mov eax, dword ptr [rbp - tcc_off] */
623 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT
); /* cmp eax, MAX_TAIL_CALL_CNT */
625 offset
= ctx
->tail_call_indirect_label
- (prog
+ 2 - start
);
626 EMIT2(X86_JAE
, offset
); /* jae out */
627 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
628 EMIT2_off32(0x89, 0x85, tcc_off
); /* mov dword ptr [rbp - tcc_off], eax */
630 /* prog = array->ptrs[index]; */
631 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
632 offsetof(struct bpf_array
, ptrs
));
638 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
640 offset
= ctx
->tail_call_indirect_label
- (prog
+ 2 - start
);
641 EMIT2(X86_JE
, offset
); /* je out */
643 if (bpf_prog
->aux
->exception_boundary
) {
644 pop_callee_regs(&prog
, all_callee_regs_used
);
647 pop_callee_regs(&prog
, callee_regs_used
);
650 EMIT1(0x58); /* pop rax */
652 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
653 round_up(stack_depth
, 8));
655 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
656 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
657 offsetof(struct bpf_prog
, bpf_func
));
658 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
659 X86_TAIL_CALL_OFFSET
);
661 * Now we're ready to jump into next BPF program
662 * rdi == ctx (1st arg)
663 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
665 emit_indirect_jump(&prog
, 1 /* rcx */, ip
+ (prog
- start
));
668 ctx
->tail_call_indirect_label
= prog
- start
;
672 static void emit_bpf_tail_call_direct(struct bpf_prog
*bpf_prog
,
673 struct bpf_jit_poke_descriptor
*poke
,
675 bool *callee_regs_used
, u32 stack_depth
,
676 struct jit_context
*ctx
)
678 int tcc_off
= -4 - round_up(stack_depth
, 8);
679 u8
*prog
= *pprog
, *start
= *pprog
;
683 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
686 EMIT2_off32(0x8B, 0x85, tcc_off
); /* mov eax, dword ptr [rbp - tcc_off] */
687 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT
); /* cmp eax, MAX_TAIL_CALL_CNT */
689 offset
= ctx
->tail_call_direct_label
- (prog
+ 2 - start
);
690 EMIT2(X86_JAE
, offset
); /* jae out */
691 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
692 EMIT2_off32(0x89, 0x85, tcc_off
); /* mov dword ptr [rbp - tcc_off], eax */
694 poke
->tailcall_bypass
= ip
+ (prog
- start
);
695 poke
->adj_off
= X86_TAIL_CALL_OFFSET
;
696 poke
->tailcall_target
= ip
+ ctx
->tail_call_direct_label
- X86_PATCH_SIZE
;
697 poke
->bypass_addr
= (u8
*)poke
->tailcall_target
+ X86_PATCH_SIZE
;
699 emit_jump(&prog
, (u8
*)poke
->tailcall_target
+ X86_PATCH_SIZE
,
700 poke
->tailcall_bypass
);
702 if (bpf_prog
->aux
->exception_boundary
) {
703 pop_callee_regs(&prog
, all_callee_regs_used
);
706 pop_callee_regs(&prog
, callee_regs_used
);
709 EMIT1(0x58); /* pop rax */
711 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth
, 8));
713 emit_nops(&prog
, X86_PATCH_SIZE
);
716 ctx
->tail_call_direct_label
= prog
- start
;
721 static void bpf_tail_call_direct_fixup(struct bpf_prog
*prog
)
723 struct bpf_jit_poke_descriptor
*poke
;
724 struct bpf_array
*array
;
725 struct bpf_prog
*target
;
728 for (i
= 0; i
< prog
->aux
->size_poke_tab
; i
++) {
729 poke
= &prog
->aux
->poke_tab
[i
];
730 if (poke
->aux
&& poke
->aux
!= prog
->aux
)
733 WARN_ON_ONCE(READ_ONCE(poke
->tailcall_target_stable
));
735 if (poke
->reason
!= BPF_POKE_REASON_TAIL_CALL
)
738 array
= container_of(poke
->tail_call
.map
, struct bpf_array
, map
);
739 mutex_lock(&array
->aux
->poke_mutex
);
740 target
= array
->ptrs
[poke
->tail_call
.key
];
742 ret
= __bpf_arch_text_poke(poke
->tailcall_target
,
744 (u8
*)target
->bpf_func
+
747 ret
= __bpf_arch_text_poke(poke
->tailcall_bypass
,
749 (u8
*)poke
->tailcall_target
+
750 X86_PATCH_SIZE
, NULL
);
753 WRITE_ONCE(poke
->tailcall_target_stable
, true);
754 mutex_unlock(&array
->aux
->poke_mutex
);
758 static void emit_mov_imm32(u8
**pprog
, bool sign_propagate
,
759 u32 dst_reg
, const u32 imm32
)
765 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
766 * (which zero-extends imm32) to save 2 bytes.
768 if (sign_propagate
&& (s32
)imm32
< 0) {
769 /* 'mov %rax, imm32' sign extends imm32 */
770 b1
= add_1mod(0x48, dst_reg
);
773 EMIT3_off32(b1
, b2
, add_1reg(b3
, dst_reg
), imm32
);
778 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
782 if (is_ereg(dst_reg
))
783 EMIT1(add_2mod(0x40, dst_reg
, dst_reg
));
786 EMIT2(b2
, add_2reg(b3
, dst_reg
, dst_reg
));
790 /* mov %eax, imm32 */
791 if (is_ereg(dst_reg
))
792 EMIT1(add_1mod(0x40, dst_reg
));
793 EMIT1_off32(add_1reg(0xB8, dst_reg
), imm32
);
798 static void emit_mov_imm64(u8
**pprog
, u32 dst_reg
,
799 const u32 imm32_hi
, const u32 imm32_lo
)
803 if (is_uimm32(((u64
)imm32_hi
<< 32) | (u32
)imm32_lo
)) {
805 * For emitting plain u32, where sign bit must not be
806 * propagated LLVM tends to load imm64 over mov32
807 * directly, so save couple of bytes by just doing
808 * 'mov %eax, imm32' instead.
810 emit_mov_imm32(&prog
, false, dst_reg
, imm32_lo
);
812 /* movabsq rax, imm64 */
813 EMIT2(add_1mod(0x48, dst_reg
), add_1reg(0xB8, dst_reg
));
821 static void emit_mov_reg(u8
**pprog
, bool is64
, u32 dst_reg
, u32 src_reg
)
827 EMIT_mov(dst_reg
, src_reg
);
830 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
831 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
832 EMIT2(0x89, add_2reg(0xC0, dst_reg
, src_reg
));
838 static void emit_movsx_reg(u8
**pprog
, int num_bits
, bool is64
, u32 dst_reg
,
844 /* movs[b,w,l]q dst, src */
846 EMIT4(add_2mod(0x48, src_reg
, dst_reg
), 0x0f, 0xbe,
847 add_2reg(0xC0, src_reg
, dst_reg
));
848 else if (num_bits
== 16)
849 EMIT4(add_2mod(0x48, src_reg
, dst_reg
), 0x0f, 0xbf,
850 add_2reg(0xC0, src_reg
, dst_reg
));
851 else if (num_bits
== 32)
852 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x63,
853 add_2reg(0xC0, src_reg
, dst_reg
));
855 /* movs[b,w]l dst, src */
857 EMIT4(add_2mod(0x40, src_reg
, dst_reg
), 0x0f, 0xbe,
858 add_2reg(0xC0, src_reg
, dst_reg
));
859 } else if (num_bits
== 16) {
860 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
861 EMIT1(add_2mod(0x40, src_reg
, dst_reg
));
862 EMIT3(add_2mod(0x0f, src_reg
, dst_reg
), 0xbf,
863 add_2reg(0xC0, src_reg
, dst_reg
));
870 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
871 static void emit_insn_suffix(u8
**pprog
, u32 ptr_reg
, u32 val_reg
, int off
)
876 /* 1-byte signed displacement.
878 * If off == 0 we could skip this and save one extra byte, but
879 * special case of x86 R13 which always needs an offset is not
882 EMIT2(add_2reg(0x40, ptr_reg
, val_reg
), off
);
884 /* 4-byte signed displacement */
885 EMIT1_off32(add_2reg(0x80, ptr_reg
, val_reg
), off
);
891 * Emit a REX byte if it will be necessary to address these registers
893 static void maybe_emit_mod(u8
**pprog
, u32 dst_reg
, u32 src_reg
, bool is64
)
898 EMIT1(add_2mod(0x48, dst_reg
, src_reg
));
899 else if (is_ereg(dst_reg
) || is_ereg(src_reg
))
900 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
905 * Similar version of maybe_emit_mod() for a single register
907 static void maybe_emit_1mod(u8
**pprog
, u32 reg
, bool is64
)
912 EMIT1(add_1mod(0x48, reg
));
913 else if (is_ereg(reg
))
914 EMIT1(add_1mod(0x40, reg
));
918 /* LDX: dst_reg = *(u8*)(src_reg + off) */
919 static void emit_ldx(u8
**pprog
, u32 size
, u32 dst_reg
, u32 src_reg
, int off
)
925 /* Emit 'movzx rax, byte ptr [rax + off]' */
926 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xB6);
929 /* Emit 'movzx rax, word ptr [rax + off]' */
930 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xB7);
933 /* Emit 'mov eax, dword ptr [rax+0x14]' */
934 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
935 EMIT2(add_2mod(0x40, src_reg
, dst_reg
), 0x8B);
940 /* Emit 'mov rax, qword ptr [rax+0x14]' */
941 EMIT2(add_2mod(0x48, src_reg
, dst_reg
), 0x8B);
944 emit_insn_suffix(&prog
, src_reg
, dst_reg
, off
);
948 /* LDSX: dst_reg = *(s8*)(src_reg + off) */
949 static void emit_ldsx(u8
**pprog
, u32 size
, u32 dst_reg
, u32 src_reg
, int off
)
955 /* Emit 'movsx rax, byte ptr [rax + off]' */
956 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xBE);
959 /* Emit 'movsx rax, word ptr [rax + off]' */
960 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xBF);
963 /* Emit 'movsx rax, dword ptr [rax+0x14]' */
964 EMIT2(add_2mod(0x48, src_reg
, dst_reg
), 0x63);
967 emit_insn_suffix(&prog
, src_reg
, dst_reg
, off
);
971 /* STX: *(u8*)(dst_reg + off) = src_reg */
972 static void emit_stx(u8
**pprog
, u32 size
, u32 dst_reg
, u32 src_reg
, int off
)
978 /* Emit 'mov byte ptr [rax + off], al' */
979 if (is_ereg(dst_reg
) || is_ereg_8l(src_reg
))
980 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
981 EMIT2(add_2mod(0x40, dst_reg
, src_reg
), 0x88);
986 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
987 EMIT3(0x66, add_2mod(0x40, dst_reg
, src_reg
), 0x89);
992 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
993 EMIT2(add_2mod(0x40, dst_reg
, src_reg
), 0x89);
998 EMIT2(add_2mod(0x48, dst_reg
, src_reg
), 0x89);
1001 emit_insn_suffix(&prog
, dst_reg
, src_reg
, off
);
1005 static int emit_atomic(u8
**pprog
, u8 atomic_op
,
1006 u32 dst_reg
, u32 src_reg
, s16 off
, u8 bpf_size
)
1010 EMIT1(0xF0); /* lock prefix */
1012 maybe_emit_mod(&prog
, dst_reg
, src_reg
, bpf_size
== BPF_DW
);
1015 switch (atomic_op
) {
1020 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
1021 EMIT1(simple_alu_opcodes
[atomic_op
]);
1023 case BPF_ADD
| BPF_FETCH
:
1024 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
1028 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
1032 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
1036 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op
);
1040 emit_insn_suffix(&prog
, dst_reg
, src_reg
, off
);
1046 bool ex_handler_bpf(const struct exception_table_entry
*x
, struct pt_regs
*regs
)
1048 u32 reg
= x
->fixup
>> 8;
1050 /* jump over faulting load and clear dest register */
1051 *(unsigned long *)((void *)regs
+ reg
) = 0;
1052 regs
->ip
+= x
->fixup
& 0xff;
1056 static void detect_reg_usage(struct bpf_insn
*insn
, int insn_cnt
,
1057 bool *regs_used
, bool *tail_call_seen
)
1061 for (i
= 1; i
<= insn_cnt
; i
++, insn
++) {
1062 if (insn
->code
== (BPF_JMP
| BPF_TAIL_CALL
))
1063 *tail_call_seen
= true;
1064 if (insn
->dst_reg
== BPF_REG_6
|| insn
->src_reg
== BPF_REG_6
)
1065 regs_used
[0] = true;
1066 if (insn
->dst_reg
== BPF_REG_7
|| insn
->src_reg
== BPF_REG_7
)
1067 regs_used
[1] = true;
1068 if (insn
->dst_reg
== BPF_REG_8
|| insn
->src_reg
== BPF_REG_8
)
1069 regs_used
[2] = true;
1070 if (insn
->dst_reg
== BPF_REG_9
|| insn
->src_reg
== BPF_REG_9
)
1071 regs_used
[3] = true;
1075 /* emit the 3-byte VEX prefix
1077 * r: same as rex.r, extra bit for ModRM reg field
1078 * x: same as rex.x, extra bit for SIB index field
1079 * b: same as rex.b, extra bit for ModRM r/m, or SIB base
1080 * m: opcode map select, encoding escape bytes e.g. 0x0f38
1081 * w: same as rex.w (32 bit or 64 bit) or opcode specific
1082 * src_reg2: additional source reg (encoded as BPF reg)
1083 * l: vector length (128 bit or 256 bit) or reserved
1084 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
1086 static void emit_3vex(u8
**pprog
, bool r
, bool x
, bool b
, u8 m
,
1087 bool w
, u8 src_reg2
, bool l
, u8 pp
)
1090 const u8 b0
= 0xc4; /* first byte of 3-byte VEX prefix */
1092 u8 vvvv
= reg2hex
[src_reg2
];
1094 /* reg2hex gives only the lower 3 bit of vvvv */
1095 if (is_ereg(src_reg2
))
1099 * 2nd byte of 3-byte VEX prefix
1100 * ~ means bit inverted encoding
1103 * +---+---+---+---+---+---+---+---+
1105 * +---+---+---+---+---+---+---+---+
1107 b1
= (!r
<< 7) | (!x
<< 6) | (!b
<< 5) | (m
& 0x1f);
1109 * 3rd byte of 3-byte VEX prefix
1112 * +---+---+---+---+---+---+---+---+
1113 * | W | ~vvvv | L | pp |
1114 * +---+---+---+---+---+---+---+---+
1116 b2
= (w
<< 7) | ((~vvvv
& 0xf) << 3) | (l
<< 2) | (pp
& 3);
1122 /* emit BMI2 shift instruction */
1123 static void emit_shiftx(u8
**pprog
, u32 dst_reg
, u8 src_reg
, bool is64
, u8 op
)
1126 bool r
= is_ereg(dst_reg
);
1127 u8 m
= 2; /* escape code 0f38 */
1129 emit_3vex(&prog
, r
, false, r
, m
, is64
, src_reg
, false, op
);
1130 EMIT2(0xf7, add_2reg(0xC0, dst_reg
, dst_reg
));
1134 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1136 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
1137 #define RESTORE_TAIL_CALL_CNT(stack) \
1138 EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
1140 static int do_jit(struct bpf_prog
*bpf_prog
, int *addrs
, u8
*image
, u8
*rw_image
,
1141 int oldproglen
, struct jit_context
*ctx
, bool jmp_padding
)
1143 bool tail_call_reachable
= bpf_prog
->aux
->tail_call_reachable
;
1144 struct bpf_insn
*insn
= bpf_prog
->insnsi
;
1145 bool callee_regs_used
[4] = {};
1146 int insn_cnt
= bpf_prog
->len
;
1147 bool tail_call_seen
= false;
1148 bool seen_exit
= false;
1149 u8 temp
[BPF_MAX_INSN_SIZE
+ BPF_INSN_SAFETY
];
1151 int ilen
, proglen
= 0;
1155 detect_reg_usage(insn
, insn_cnt
, callee_regs_used
,
1158 /* tail call's presence in current prog implies it is reachable */
1159 tail_call_reachable
|= tail_call_seen
;
1161 emit_prologue(&prog
, bpf_prog
->aux
->stack_depth
,
1162 bpf_prog_was_classic(bpf_prog
), tail_call_reachable
,
1163 bpf_is_subprog(bpf_prog
), bpf_prog
->aux
->exception_cb
);
1164 /* Exception callback will clobber callee regs for its own use, and
1165 * restore the original callee regs from main prog's stack frame.
1167 if (bpf_prog
->aux
->exception_boundary
) {
1168 /* We also need to save r12, which is not mapped to any BPF
1169 * register, as we throw after entry into the kernel, which may
1173 push_callee_regs(&prog
, all_callee_regs_used
);
1175 push_callee_regs(&prog
, callee_regs_used
);
1180 memcpy(rw_image
+ proglen
, temp
, ilen
);
1185 for (i
= 1; i
<= insn_cnt
; i
++, insn
++) {
1186 const s32 imm32
= insn
->imm
;
1187 u32 dst_reg
= insn
->dst_reg
;
1188 u32 src_reg
= insn
->src_reg
;
1197 switch (insn
->code
) {
1199 case BPF_ALU
| BPF_ADD
| BPF_X
:
1200 case BPF_ALU
| BPF_SUB
| BPF_X
:
1201 case BPF_ALU
| BPF_AND
| BPF_X
:
1202 case BPF_ALU
| BPF_OR
| BPF_X
:
1203 case BPF_ALU
| BPF_XOR
| BPF_X
:
1204 case BPF_ALU64
| BPF_ADD
| BPF_X
:
1205 case BPF_ALU64
| BPF_SUB
| BPF_X
:
1206 case BPF_ALU64
| BPF_AND
| BPF_X
:
1207 case BPF_ALU64
| BPF_OR
| BPF_X
:
1208 case BPF_ALU64
| BPF_XOR
| BPF_X
:
1209 maybe_emit_mod(&prog
, dst_reg
, src_reg
,
1210 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1211 b2
= simple_alu_opcodes
[BPF_OP(insn
->code
)];
1212 EMIT2(b2
, add_2reg(0xC0, dst_reg
, src_reg
));
1215 case BPF_ALU64
| BPF_MOV
| BPF_X
:
1216 case BPF_ALU
| BPF_MOV
| BPF_X
:
1219 BPF_CLASS(insn
->code
) == BPF_ALU64
,
1222 emit_movsx_reg(&prog
, insn
->off
,
1223 BPF_CLASS(insn
->code
) == BPF_ALU64
,
1228 case BPF_ALU
| BPF_NEG
:
1229 case BPF_ALU64
| BPF_NEG
:
1230 maybe_emit_1mod(&prog
, dst_reg
,
1231 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1232 EMIT2(0xF7, add_1reg(0xD8, dst_reg
));
1235 case BPF_ALU
| BPF_ADD
| BPF_K
:
1236 case BPF_ALU
| BPF_SUB
| BPF_K
:
1237 case BPF_ALU
| BPF_AND
| BPF_K
:
1238 case BPF_ALU
| BPF_OR
| BPF_K
:
1239 case BPF_ALU
| BPF_XOR
| BPF_K
:
1240 case BPF_ALU64
| BPF_ADD
| BPF_K
:
1241 case BPF_ALU64
| BPF_SUB
| BPF_K
:
1242 case BPF_ALU64
| BPF_AND
| BPF_K
:
1243 case BPF_ALU64
| BPF_OR
| BPF_K
:
1244 case BPF_ALU64
| BPF_XOR
| BPF_K
:
1245 maybe_emit_1mod(&prog
, dst_reg
,
1246 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1249 * b3 holds 'normal' opcode, b2 short form only valid
1250 * in case dst is eax/rax.
1252 switch (BPF_OP(insn
->code
)) {
1276 EMIT3(0x83, add_1reg(b3
, dst_reg
), imm32
);
1277 else if (is_axreg(dst_reg
))
1278 EMIT1_off32(b2
, imm32
);
1280 EMIT2_off32(0x81, add_1reg(b3
, dst_reg
), imm32
);
1283 case BPF_ALU64
| BPF_MOV
| BPF_K
:
1284 case BPF_ALU
| BPF_MOV
| BPF_K
:
1285 emit_mov_imm32(&prog
, BPF_CLASS(insn
->code
) == BPF_ALU64
,
1289 case BPF_LD
| BPF_IMM
| BPF_DW
:
1290 emit_mov_imm64(&prog
, dst_reg
, insn
[1].imm
, insn
[0].imm
);
1295 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1296 case BPF_ALU
| BPF_MOD
| BPF_X
:
1297 case BPF_ALU
| BPF_DIV
| BPF_X
:
1298 case BPF_ALU
| BPF_MOD
| BPF_K
:
1299 case BPF_ALU
| BPF_DIV
| BPF_K
:
1300 case BPF_ALU64
| BPF_MOD
| BPF_X
:
1301 case BPF_ALU64
| BPF_DIV
| BPF_X
:
1302 case BPF_ALU64
| BPF_MOD
| BPF_K
:
1303 case BPF_ALU64
| BPF_DIV
| BPF_K
: {
1304 bool is64
= BPF_CLASS(insn
->code
) == BPF_ALU64
;
1306 if (dst_reg
!= BPF_REG_0
)
1307 EMIT1(0x50); /* push rax */
1308 if (dst_reg
!= BPF_REG_3
)
1309 EMIT1(0x52); /* push rdx */
1311 if (BPF_SRC(insn
->code
) == BPF_X
) {
1312 if (src_reg
== BPF_REG_0
||
1313 src_reg
== BPF_REG_3
) {
1314 /* mov r11, src_reg */
1315 EMIT_mov(AUX_REG
, src_reg
);
1319 /* mov r11, imm32 */
1320 EMIT3_off32(0x49, 0xC7, 0xC3, imm32
);
1324 if (dst_reg
!= BPF_REG_0
)
1325 /* mov rax, dst_reg */
1326 emit_mov_reg(&prog
, is64
, BPF_REG_0
, dst_reg
);
1328 if (insn
->off
== 0) {
1331 * equivalent to 'xor rdx, rdx', but one byte less
1336 maybe_emit_1mod(&prog
, src_reg
, is64
);
1337 EMIT2(0xF7, add_1reg(0xF0, src_reg
));
1339 if (BPF_CLASS(insn
->code
) == BPF_ALU
)
1340 EMIT1(0x99); /* cdq */
1342 EMIT2(0x48, 0x99); /* cqo */
1345 maybe_emit_1mod(&prog
, src_reg
, is64
);
1346 EMIT2(0xF7, add_1reg(0xF8, src_reg
));
1349 if (BPF_OP(insn
->code
) == BPF_MOD
&&
1350 dst_reg
!= BPF_REG_3
)
1351 /* mov dst_reg, rdx */
1352 emit_mov_reg(&prog
, is64
, dst_reg
, BPF_REG_3
);
1353 else if (BPF_OP(insn
->code
) == BPF_DIV
&&
1354 dst_reg
!= BPF_REG_0
)
1355 /* mov dst_reg, rax */
1356 emit_mov_reg(&prog
, is64
, dst_reg
, BPF_REG_0
);
1358 if (dst_reg
!= BPF_REG_3
)
1359 EMIT1(0x5A); /* pop rdx */
1360 if (dst_reg
!= BPF_REG_0
)
1361 EMIT1(0x58); /* pop rax */
1365 case BPF_ALU
| BPF_MUL
| BPF_K
:
1366 case BPF_ALU64
| BPF_MUL
| BPF_K
:
1367 maybe_emit_mod(&prog
, dst_reg
, dst_reg
,
1368 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1371 /* imul dst_reg, dst_reg, imm8 */
1372 EMIT3(0x6B, add_2reg(0xC0, dst_reg
, dst_reg
),
1375 /* imul dst_reg, dst_reg, imm32 */
1377 add_2reg(0xC0, dst_reg
, dst_reg
),
1381 case BPF_ALU
| BPF_MUL
| BPF_X
:
1382 case BPF_ALU64
| BPF_MUL
| BPF_X
:
1383 maybe_emit_mod(&prog
, src_reg
, dst_reg
,
1384 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1386 /* imul dst_reg, src_reg */
1387 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg
, dst_reg
));
1391 case BPF_ALU
| BPF_LSH
| BPF_K
:
1392 case BPF_ALU
| BPF_RSH
| BPF_K
:
1393 case BPF_ALU
| BPF_ARSH
| BPF_K
:
1394 case BPF_ALU64
| BPF_LSH
| BPF_K
:
1395 case BPF_ALU64
| BPF_RSH
| BPF_K
:
1396 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
1397 maybe_emit_1mod(&prog
, dst_reg
,
1398 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1400 b3
= simple_alu_opcodes
[BPF_OP(insn
->code
)];
1402 EMIT2(0xD1, add_1reg(b3
, dst_reg
));
1404 EMIT3(0xC1, add_1reg(b3
, dst_reg
), imm32
);
1407 case BPF_ALU
| BPF_LSH
| BPF_X
:
1408 case BPF_ALU
| BPF_RSH
| BPF_X
:
1409 case BPF_ALU
| BPF_ARSH
| BPF_X
:
1410 case BPF_ALU64
| BPF_LSH
| BPF_X
:
1411 case BPF_ALU64
| BPF_RSH
| BPF_X
:
1412 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
1413 /* BMI2 shifts aren't better when shift count is already in rcx */
1414 if (boot_cpu_has(X86_FEATURE_BMI2
) && src_reg
!= BPF_REG_4
) {
1415 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
1416 bool w
= (BPF_CLASS(insn
->code
) == BPF_ALU64
);
1419 switch (BPF_OP(insn
->code
)) {
1421 op
= 1; /* prefix 0x66 */
1424 op
= 3; /* prefix 0xf2 */
1427 op
= 2; /* prefix 0xf3 */
1431 emit_shiftx(&prog
, dst_reg
, src_reg
, w
, op
);
1436 if (src_reg
!= BPF_REG_4
) { /* common case */
1437 /* Check for bad case when dst_reg == rcx */
1438 if (dst_reg
== BPF_REG_4
) {
1439 /* mov r11, dst_reg */
1440 EMIT_mov(AUX_REG
, dst_reg
);
1443 EMIT1(0x51); /* push rcx */
1445 /* mov rcx, src_reg */
1446 EMIT_mov(BPF_REG_4
, src_reg
);
1449 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1450 maybe_emit_1mod(&prog
, dst_reg
,
1451 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1453 b3
= simple_alu_opcodes
[BPF_OP(insn
->code
)];
1454 EMIT2(0xD3, add_1reg(b3
, dst_reg
));
1456 if (src_reg
!= BPF_REG_4
) {
1457 if (insn
->dst_reg
== BPF_REG_4
)
1458 /* mov dst_reg, r11 */
1459 EMIT_mov(insn
->dst_reg
, AUX_REG
);
1461 EMIT1(0x59); /* pop rcx */
1466 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
1467 case BPF_ALU64
| BPF_END
| BPF_FROM_LE
:
1470 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1472 if (is_ereg(dst_reg
))
1474 EMIT3(0xC1, add_1reg(0xC8, dst_reg
), 8);
1476 /* Emit 'movzwl eax, ax' */
1477 if (is_ereg(dst_reg
))
1478 EMIT3(0x45, 0x0F, 0xB7);
1481 EMIT1(add_2reg(0xC0, dst_reg
, dst_reg
));
1484 /* Emit 'bswap eax' to swap lower 4 bytes */
1485 if (is_ereg(dst_reg
))
1489 EMIT1(add_1reg(0xC8, dst_reg
));
1492 /* Emit 'bswap rax' to swap 8 bytes */
1493 EMIT3(add_1mod(0x48, dst_reg
), 0x0F,
1494 add_1reg(0xC8, dst_reg
));
1499 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
1503 * Emit 'movzwl eax, ax' to zero extend 16-bit
1506 if (is_ereg(dst_reg
))
1507 EMIT3(0x45, 0x0F, 0xB7);
1510 EMIT1(add_2reg(0xC0, dst_reg
, dst_reg
));
1513 /* Emit 'mov eax, eax' to clear upper 32-bits */
1514 if (is_ereg(dst_reg
))
1516 EMIT2(0x89, add_2reg(0xC0, dst_reg
, dst_reg
));
1524 /* speculation barrier */
1525 case BPF_ST
| BPF_NOSPEC
:
1529 /* ST: *(u8*)(dst_reg + off) = imm */
1530 case BPF_ST
| BPF_MEM
| BPF_B
:
1531 if (is_ereg(dst_reg
))
1536 case BPF_ST
| BPF_MEM
| BPF_H
:
1537 if (is_ereg(dst_reg
))
1538 EMIT3(0x66, 0x41, 0xC7);
1542 case BPF_ST
| BPF_MEM
| BPF_W
:
1543 if (is_ereg(dst_reg
))
1548 case BPF_ST
| BPF_MEM
| BPF_DW
:
1549 EMIT2(add_1mod(0x48, dst_reg
), 0xC7);
1551 st
: if (is_imm8(insn
->off
))
1552 EMIT2(add_1reg(0x40, dst_reg
), insn
->off
);
1554 EMIT1_off32(add_1reg(0x80, dst_reg
), insn
->off
);
1556 EMIT(imm32
, bpf_size_to_x86_bytes(BPF_SIZE(insn
->code
)));
1559 /* STX: *(u8*)(dst_reg + off) = src_reg */
1560 case BPF_STX
| BPF_MEM
| BPF_B
:
1561 case BPF_STX
| BPF_MEM
| BPF_H
:
1562 case BPF_STX
| BPF_MEM
| BPF_W
:
1563 case BPF_STX
| BPF_MEM
| BPF_DW
:
1564 emit_stx(&prog
, BPF_SIZE(insn
->code
), dst_reg
, src_reg
, insn
->off
);
1567 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1568 case BPF_LDX
| BPF_MEM
| BPF_B
:
1569 case BPF_LDX
| BPF_PROBE_MEM
| BPF_B
:
1570 case BPF_LDX
| BPF_MEM
| BPF_H
:
1571 case BPF_LDX
| BPF_PROBE_MEM
| BPF_H
:
1572 case BPF_LDX
| BPF_MEM
| BPF_W
:
1573 case BPF_LDX
| BPF_PROBE_MEM
| BPF_W
:
1574 case BPF_LDX
| BPF_MEM
| BPF_DW
:
1575 case BPF_LDX
| BPF_PROBE_MEM
| BPF_DW
:
1576 /* LDXS: dst_reg = *(s8*)(src_reg + off) */
1577 case BPF_LDX
| BPF_MEMSX
| BPF_B
:
1578 case BPF_LDX
| BPF_MEMSX
| BPF_H
:
1579 case BPF_LDX
| BPF_MEMSX
| BPF_W
:
1580 case BPF_LDX
| BPF_PROBE_MEMSX
| BPF_B
:
1581 case BPF_LDX
| BPF_PROBE_MEMSX
| BPF_H
:
1582 case BPF_LDX
| BPF_PROBE_MEMSX
| BPF_W
:
1583 insn_off
= insn
->off
;
1585 if (BPF_MODE(insn
->code
) == BPF_PROBE_MEM
||
1586 BPF_MODE(insn
->code
) == BPF_PROBE_MEMSX
) {
1587 /* Conservatively check that src_reg + insn->off is a kernel address:
1588 * src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
1589 * src_reg is used as scratch for src_reg += insn->off and restored
1590 * after emit_ldx if necessary
1593 u64 limit
= TASK_SIZE_MAX
+ PAGE_SIZE
;
1596 /* At end of these emitted checks, insn->off will have been added
1597 * to src_reg, so no need to do relative load with insn->off offset
1601 /* movabsq r11, limit */
1602 EMIT2(add_1mod(0x48, AUX_REG
), add_1reg(0xB8, AUX_REG
));
1603 EMIT((u32
)limit
, 4);
1604 EMIT(limit
>> 32, 4);
1607 /* add src_reg, insn->off */
1608 maybe_emit_1mod(&prog
, src_reg
, true);
1609 EMIT2_off32(0x81, add_1reg(0xC0, src_reg
), insn
->off
);
1612 /* cmp src_reg, r11 */
1613 maybe_emit_mod(&prog
, src_reg
, AUX_REG
, true);
1614 EMIT2(0x39, add_2reg(0xC0, src_reg
, AUX_REG
));
1616 /* if unsigned '>=', goto load */
1620 /* xor dst_reg, dst_reg */
1621 emit_mov_imm32(&prog
, false, dst_reg
, 0);
1622 /* jmp byte_after_ldx */
1625 /* populate jmp_offset for JAE above to jump to start_of_ldx */
1626 start_of_ldx
= prog
;
1627 end_of_jmp
[-1] = start_of_ldx
- end_of_jmp
;
1629 if (BPF_MODE(insn
->code
) == BPF_PROBE_MEMSX
||
1630 BPF_MODE(insn
->code
) == BPF_MEMSX
)
1631 emit_ldsx(&prog
, BPF_SIZE(insn
->code
), dst_reg
, src_reg
, insn_off
);
1633 emit_ldx(&prog
, BPF_SIZE(insn
->code
), dst_reg
, src_reg
, insn_off
);
1634 if (BPF_MODE(insn
->code
) == BPF_PROBE_MEM
||
1635 BPF_MODE(insn
->code
) == BPF_PROBE_MEMSX
) {
1636 struct exception_table_entry
*ex
;
1637 u8
*_insn
= image
+ proglen
+ (start_of_ldx
- temp
);
1640 /* populate jmp_offset for JMP above */
1641 start_of_ldx
[-1] = prog
- start_of_ldx
;
1643 if (insn
->off
&& src_reg
!= dst_reg
) {
1644 /* sub src_reg, insn->off
1645 * Restore src_reg after "add src_reg, insn->off" in prev
1646 * if statement. But if src_reg == dst_reg, emit_ldx
1647 * above already clobbered src_reg, so no need to restore.
1648 * If add src_reg, insn->off was unnecessary, no need to
1651 maybe_emit_1mod(&prog
, src_reg
, true);
1652 EMIT2_off32(0x81, add_1reg(0xE8, src_reg
), insn
->off
);
1655 if (!bpf_prog
->aux
->extable
)
1658 if (excnt
>= bpf_prog
->aux
->num_exentries
) {
1659 pr_err("ex gen bug\n");
1662 ex
= &bpf_prog
->aux
->extable
[excnt
++];
1664 delta
= _insn
- (u8
*)&ex
->insn
;
1665 if (!is_simm32(delta
)) {
1666 pr_err("extable->insn doesn't fit into 32-bit\n");
1669 /* switch ex to rw buffer for writes */
1670 ex
= (void *)rw_image
+ ((void *)ex
- (void *)image
);
1674 ex
->data
= EX_TYPE_BPF
;
1676 if (dst_reg
> BPF_REG_9
) {
1677 pr_err("verifier error\n");
1681 * Compute size of x86 insn and its target dest x86 register.
1682 * ex_handler_bpf() will use lower 8 bits to adjust
1683 * pt_regs->ip to jump over this x86 instruction
1684 * and upper bits to figure out which pt_regs to zero out.
1685 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1686 * of 4 bytes will be ignored and rbx will be zero inited.
1688 ex
->fixup
= (prog
- start_of_ldx
) | (reg2pt_regs
[dst_reg
] << 8);
1692 case BPF_STX
| BPF_ATOMIC
| BPF_W
:
1693 case BPF_STX
| BPF_ATOMIC
| BPF_DW
:
1694 if (insn
->imm
== (BPF_AND
| BPF_FETCH
) ||
1695 insn
->imm
== (BPF_OR
| BPF_FETCH
) ||
1696 insn
->imm
== (BPF_XOR
| BPF_FETCH
)) {
1697 bool is64
= BPF_SIZE(insn
->code
) == BPF_DW
;
1698 u32 real_src_reg
= src_reg
;
1699 u32 real_dst_reg
= dst_reg
;
1703 * Can't be implemented with a single x86 insn.
1704 * Need to do a CMPXCHG loop.
1707 /* Will need RAX as a CMPXCHG operand so save R0 */
1708 emit_mov_reg(&prog
, true, BPF_REG_AX
, BPF_REG_0
);
1709 if (src_reg
== BPF_REG_0
)
1710 real_src_reg
= BPF_REG_AX
;
1711 if (dst_reg
== BPF_REG_0
)
1712 real_dst_reg
= BPF_REG_AX
;
1714 branch_target
= prog
;
1715 /* Load old value */
1716 emit_ldx(&prog
, BPF_SIZE(insn
->code
),
1717 BPF_REG_0
, real_dst_reg
, insn
->off
);
1719 * Perform the (commutative) operation locally,
1720 * put the result in the AUX_REG.
1722 emit_mov_reg(&prog
, is64
, AUX_REG
, BPF_REG_0
);
1723 maybe_emit_mod(&prog
, AUX_REG
, real_src_reg
, is64
);
1724 EMIT2(simple_alu_opcodes
[BPF_OP(insn
->imm
)],
1725 add_2reg(0xC0, AUX_REG
, real_src_reg
));
1726 /* Attempt to swap in new value */
1727 err
= emit_atomic(&prog
, BPF_CMPXCHG
,
1728 real_dst_reg
, AUX_REG
,
1730 BPF_SIZE(insn
->code
));
1734 * ZF tells us whether we won the race. If it's
1735 * cleared we need to try again.
1737 EMIT2(X86_JNE
, -(prog
- branch_target
) - 2);
1738 /* Return the pre-modification value */
1739 emit_mov_reg(&prog
, is64
, real_src_reg
, BPF_REG_0
);
1740 /* Restore R0 after clobbering RAX */
1741 emit_mov_reg(&prog
, true, BPF_REG_0
, BPF_REG_AX
);
1745 err
= emit_atomic(&prog
, insn
->imm
, dst_reg
, src_reg
,
1746 insn
->off
, BPF_SIZE(insn
->code
));
1752 case BPF_JMP
| BPF_CALL
: {
1755 func
= (u8
*) __bpf_call_base
+ imm32
;
1756 if (tail_call_reachable
) {
1757 RESTORE_TAIL_CALL_CNT(bpf_prog
->aux
->stack_depth
);
1760 offs
= 7 + x86_call_depth_emit_accounting(&prog
, func
);
1764 offs
= x86_call_depth_emit_accounting(&prog
, func
);
1766 if (emit_call(&prog
, func
, image
+ addrs
[i
- 1] + offs
))
1771 case BPF_JMP
| BPF_TAIL_CALL
:
1773 emit_bpf_tail_call_direct(bpf_prog
,
1774 &bpf_prog
->aux
->poke_tab
[imm32
- 1],
1775 &prog
, image
+ addrs
[i
- 1],
1777 bpf_prog
->aux
->stack_depth
,
1780 emit_bpf_tail_call_indirect(bpf_prog
,
1783 bpf_prog
->aux
->stack_depth
,
1784 image
+ addrs
[i
- 1],
1789 case BPF_JMP
| BPF_JEQ
| BPF_X
:
1790 case BPF_JMP
| BPF_JNE
| BPF_X
:
1791 case BPF_JMP
| BPF_JGT
| BPF_X
:
1792 case BPF_JMP
| BPF_JLT
| BPF_X
:
1793 case BPF_JMP
| BPF_JGE
| BPF_X
:
1794 case BPF_JMP
| BPF_JLE
| BPF_X
:
1795 case BPF_JMP
| BPF_JSGT
| BPF_X
:
1796 case BPF_JMP
| BPF_JSLT
| BPF_X
:
1797 case BPF_JMP
| BPF_JSGE
| BPF_X
:
1798 case BPF_JMP
| BPF_JSLE
| BPF_X
:
1799 case BPF_JMP32
| BPF_JEQ
| BPF_X
:
1800 case BPF_JMP32
| BPF_JNE
| BPF_X
:
1801 case BPF_JMP32
| BPF_JGT
| BPF_X
:
1802 case BPF_JMP32
| BPF_JLT
| BPF_X
:
1803 case BPF_JMP32
| BPF_JGE
| BPF_X
:
1804 case BPF_JMP32
| BPF_JLE
| BPF_X
:
1805 case BPF_JMP32
| BPF_JSGT
| BPF_X
:
1806 case BPF_JMP32
| BPF_JSLT
| BPF_X
:
1807 case BPF_JMP32
| BPF_JSGE
| BPF_X
:
1808 case BPF_JMP32
| BPF_JSLE
| BPF_X
:
1809 /* cmp dst_reg, src_reg */
1810 maybe_emit_mod(&prog
, dst_reg
, src_reg
,
1811 BPF_CLASS(insn
->code
) == BPF_JMP
);
1812 EMIT2(0x39, add_2reg(0xC0, dst_reg
, src_reg
));
1815 case BPF_JMP
| BPF_JSET
| BPF_X
:
1816 case BPF_JMP32
| BPF_JSET
| BPF_X
:
1817 /* test dst_reg, src_reg */
1818 maybe_emit_mod(&prog
, dst_reg
, src_reg
,
1819 BPF_CLASS(insn
->code
) == BPF_JMP
);
1820 EMIT2(0x85, add_2reg(0xC0, dst_reg
, src_reg
));
1823 case BPF_JMP
| BPF_JSET
| BPF_K
:
1824 case BPF_JMP32
| BPF_JSET
| BPF_K
:
1825 /* test dst_reg, imm32 */
1826 maybe_emit_1mod(&prog
, dst_reg
,
1827 BPF_CLASS(insn
->code
) == BPF_JMP
);
1828 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg
), imm32
);
1831 case BPF_JMP
| BPF_JEQ
| BPF_K
:
1832 case BPF_JMP
| BPF_JNE
| BPF_K
:
1833 case BPF_JMP
| BPF_JGT
| BPF_K
:
1834 case BPF_JMP
| BPF_JLT
| BPF_K
:
1835 case BPF_JMP
| BPF_JGE
| BPF_K
:
1836 case BPF_JMP
| BPF_JLE
| BPF_K
:
1837 case BPF_JMP
| BPF_JSGT
| BPF_K
:
1838 case BPF_JMP
| BPF_JSLT
| BPF_K
:
1839 case BPF_JMP
| BPF_JSGE
| BPF_K
:
1840 case BPF_JMP
| BPF_JSLE
| BPF_K
:
1841 case BPF_JMP32
| BPF_JEQ
| BPF_K
:
1842 case BPF_JMP32
| BPF_JNE
| BPF_K
:
1843 case BPF_JMP32
| BPF_JGT
| BPF_K
:
1844 case BPF_JMP32
| BPF_JLT
| BPF_K
:
1845 case BPF_JMP32
| BPF_JGE
| BPF_K
:
1846 case BPF_JMP32
| BPF_JLE
| BPF_K
:
1847 case BPF_JMP32
| BPF_JSGT
| BPF_K
:
1848 case BPF_JMP32
| BPF_JSLT
| BPF_K
:
1849 case BPF_JMP32
| BPF_JSGE
| BPF_K
:
1850 case BPF_JMP32
| BPF_JSLE
| BPF_K
:
1851 /* test dst_reg, dst_reg to save one extra byte */
1853 maybe_emit_mod(&prog
, dst_reg
, dst_reg
,
1854 BPF_CLASS(insn
->code
) == BPF_JMP
);
1855 EMIT2(0x85, add_2reg(0xC0, dst_reg
, dst_reg
));
1859 /* cmp dst_reg, imm8/32 */
1860 maybe_emit_1mod(&prog
, dst_reg
,
1861 BPF_CLASS(insn
->code
) == BPF_JMP
);
1864 EMIT3(0x83, add_1reg(0xF8, dst_reg
), imm32
);
1866 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg
), imm32
);
1868 emit_cond_jmp
: /* Convert BPF opcode to x86 */
1869 switch (BPF_OP(insn
->code
)) {
1878 /* GT is unsigned '>', JA in x86 */
1882 /* LT is unsigned '<', JB in x86 */
1886 /* GE is unsigned '>=', JAE in x86 */
1890 /* LE is unsigned '<=', JBE in x86 */
1894 /* Signed '>', GT in x86 */
1898 /* Signed '<', LT in x86 */
1902 /* Signed '>=', GE in x86 */
1906 /* Signed '<=', LE in x86 */
1909 default: /* to silence GCC warning */
1912 jmp_offset
= addrs
[i
+ insn
->off
] - addrs
[i
];
1913 if (is_imm8(jmp_offset
)) {
1915 /* To keep the jmp_offset valid, the extra bytes are
1916 * padded before the jump insn, so we subtract the
1917 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1919 * If the previous pass already emits an imm8
1920 * jmp_cond, then this BPF insn won't shrink, so
1923 * On the other hand, if the previous pass emits an
1924 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1925 * keep the image from shrinking further.
1927 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1928 * is 2 bytes, so the size difference is 4 bytes.
1930 nops
= INSN_SZ_DIFF
- 2;
1931 if (nops
!= 0 && nops
!= 4) {
1932 pr_err("unexpected jmp_cond padding: %d bytes\n",
1936 emit_nops(&prog
, nops
);
1938 EMIT2(jmp_cond
, jmp_offset
);
1939 } else if (is_simm32(jmp_offset
)) {
1940 EMIT2_off32(0x0F, jmp_cond
+ 0x10, jmp_offset
);
1942 pr_err("cond_jmp gen bug %llx\n", jmp_offset
);
1948 case BPF_JMP
| BPF_JA
:
1949 case BPF_JMP32
| BPF_JA
:
1950 if (BPF_CLASS(insn
->code
) == BPF_JMP
) {
1951 if (insn
->off
== -1)
1952 /* -1 jmp instructions will always jump
1953 * backwards two bytes. Explicitly handling
1954 * this case avoids wasting too many passes
1955 * when there are long sequences of replaced
1960 jmp_offset
= addrs
[i
+ insn
->off
] - addrs
[i
];
1962 if (insn
->imm
== -1)
1965 jmp_offset
= addrs
[i
+ insn
->imm
] - addrs
[i
];
1970 * If jmp_padding is enabled, the extra nops will
1971 * be inserted. Otherwise, optimize out nop jumps.
1974 /* There are 3 possible conditions.
1975 * (1) This BPF_JA is already optimized out in
1976 * the previous run, so there is no need
1977 * to pad any extra byte (0 byte).
1978 * (2) The previous pass emits an imm8 jmp,
1979 * so we pad 2 bytes to match the previous
1981 * (3) Similarly, the previous pass emits an
1982 * imm32 jmp, and 5 bytes is padded.
1984 nops
= INSN_SZ_DIFF
;
1985 if (nops
!= 0 && nops
!= 2 && nops
!= 5) {
1986 pr_err("unexpected nop jump padding: %d bytes\n",
1990 emit_nops(&prog
, nops
);
1995 if (is_imm8(jmp_offset
)) {
1997 /* To avoid breaking jmp_offset, the extra bytes
1998 * are padded before the actual jmp insn, so
1999 * 2 bytes is subtracted from INSN_SZ_DIFF.
2001 * If the previous pass already emits an imm8
2002 * jmp, there is nothing to pad (0 byte).
2004 * If it emits an imm32 jmp (5 bytes) previously
2005 * and now an imm8 jmp (2 bytes), then we pad
2006 * (5 - 2 = 3) bytes to stop the image from
2007 * shrinking further.
2009 nops
= INSN_SZ_DIFF
- 2;
2010 if (nops
!= 0 && nops
!= 3) {
2011 pr_err("unexpected jump padding: %d bytes\n",
2015 emit_nops(&prog
, INSN_SZ_DIFF
- 2);
2017 EMIT2(0xEB, jmp_offset
);
2018 } else if (is_simm32(jmp_offset
)) {
2019 EMIT1_off32(0xE9, jmp_offset
);
2021 pr_err("jmp gen bug %llx\n", jmp_offset
);
2026 case BPF_JMP
| BPF_EXIT
:
2028 jmp_offset
= ctx
->cleanup_addr
- addrs
[i
];
2032 /* Update cleanup_addr */
2033 ctx
->cleanup_addr
= proglen
;
2034 if (bpf_prog
->aux
->exception_boundary
) {
2035 pop_callee_regs(&prog
, all_callee_regs_used
);
2038 pop_callee_regs(&prog
, callee_regs_used
);
2040 EMIT1(0xC9); /* leave */
2041 emit_return(&prog
, image
+ addrs
[i
- 1] + (prog
- temp
));
2046 * By design x86-64 JIT should support all BPF instructions.
2047 * This error will be seen if new instruction was added
2048 * to the interpreter, but not to the JIT, or if there is
2051 pr_err("bpf_jit: unknown opcode %02x\n", insn
->code
);
2056 if (ilen
> BPF_MAX_INSN_SIZE
) {
2057 pr_err("bpf_jit: fatal insn size error\n");
2063 * When populating the image, assert that:
2065 * i) We do not write beyond the allocated space, and
2066 * ii) addrs[i] did not change from the prior run, in order
2067 * to validate assumptions made for computing branch
2070 if (unlikely(proglen
+ ilen
> oldproglen
||
2071 proglen
+ ilen
!= addrs
[i
])) {
2072 pr_err("bpf_jit: fatal error\n");
2075 memcpy(rw_image
+ proglen
, temp
, ilen
);
2082 if (image
&& excnt
!= bpf_prog
->aux
->num_exentries
) {
2083 pr_err("extable is not populated\n");
2089 static void clean_stack_garbage(const struct btf_func_model
*m
,
2090 u8
**pprog
, int nr_stack_slots
,
2096 /* Generally speaking, the compiler will pass the arguments
2097 * on-stack with "push" instruction, which will take 8-byte
2098 * on the stack. In this case, there won't be garbage values
2099 * while we copy the arguments from origin stack frame to current
2102 * However, sometimes the compiler will only allocate 4-byte on
2103 * the stack for the arguments. For now, this case will only
2104 * happen if there is only one argument on-stack and its size
2105 * not more than 4 byte. In this case, there will be garbage
2106 * values on the upper 4-byte where we store the argument on
2107 * current stack frame.
2109 * arguments on origin stack:
2111 * stack_arg_1(4-byte) xxx(4-byte)
2115 * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2117 * and the xxx is the garbage values which we should clean here.
2119 if (nr_stack_slots
!= 1)
2122 /* the size of the last argument */
2123 arg_size
= m
->arg_size
[m
->nr_args
- 1];
2124 if (arg_size
<= 4) {
2125 off
= -(stack_size
- 4);
2127 /* mov DWORD PTR [rbp + off], 0 */
2129 EMIT2_off32(0xC7, 0x85, off
);
2131 EMIT3(0xC7, 0x45, off
);
2137 /* get the count of the regs that are used to pass arguments */
2138 static int get_nr_used_regs(const struct btf_func_model
*m
)
2140 int i
, arg_regs
, nr_used_regs
= 0;
2142 for (i
= 0; i
< min_t(int, m
->nr_args
, MAX_BPF_FUNC_ARGS
); i
++) {
2143 arg_regs
= (m
->arg_size
[i
] + 7) / 8;
2144 if (nr_used_regs
+ arg_regs
<= 6)
2145 nr_used_regs
+= arg_regs
;
2147 if (nr_used_regs
>= 6)
2151 return nr_used_regs
;
2154 static void save_args(const struct btf_func_model
*m
, u8
**prog
,
2155 int stack_size
, bool for_call_origin
)
2157 int arg_regs
, first_off
= 0, nr_regs
= 0, nr_stack_slots
= 0;
2160 /* Store function arguments to stack.
2161 * For a function that accepts two pointers the sequence will be:
2162 * mov QWORD PTR [rbp-0x10],rdi
2163 * mov QWORD PTR [rbp-0x8],rsi
2165 for (i
= 0; i
< min_t(int, m
->nr_args
, MAX_BPF_FUNC_ARGS
); i
++) {
2166 arg_regs
= (m
->arg_size
[i
] + 7) / 8;
2168 /* According to the research of Yonghong, struct members
2169 * should be all in register or all on the stack.
2170 * Meanwhile, the compiler will pass the argument on regs
2171 * if the remaining regs can hold the argument.
2173 * Disorder of the args can happen. For example:
2175 * struct foo_struct {
2179 * int foo(char, char, char, char, char, struct foo_struct,
2182 * the arg1-5,arg7 will be passed by regs, and arg6 will
2185 if (nr_regs
+ arg_regs
> 6) {
2186 /* copy function arguments from origin stack frame
2187 * into current stack frame.
2189 * The starting address of the arguments on-stack
2191 * rbp + 8(push rbp) +
2192 * 8(return addr of origin call) +
2193 * 8(return addr of the caller)
2194 * which means: rbp + 24
2196 for (j
= 0; j
< arg_regs
; j
++) {
2197 emit_ldx(prog
, BPF_DW
, BPF_REG_0
, BPF_REG_FP
,
2198 nr_stack_slots
* 8 + 0x18);
2199 emit_stx(prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
,
2202 if (!nr_stack_slots
)
2203 first_off
= stack_size
;
2208 /* Only copy the arguments on-stack to current
2209 * 'stack_size' and ignore the regs, used to
2210 * prepare the arguments on-stack for origin call.
2212 if (for_call_origin
) {
2213 nr_regs
+= arg_regs
;
2217 /* copy the arguments from regs into stack */
2218 for (j
= 0; j
< arg_regs
; j
++) {
2219 emit_stx(prog
, BPF_DW
, BPF_REG_FP
,
2220 nr_regs
== 5 ? X86_REG_R9
: BPF_REG_1
+ nr_regs
,
2228 clean_stack_garbage(m
, prog
, nr_stack_slots
, first_off
);
2231 static void restore_regs(const struct btf_func_model
*m
, u8
**prog
,
2234 int i
, j
, arg_regs
, nr_regs
= 0;
2236 /* Restore function arguments from stack.
2237 * For a function that accepts two pointers the sequence will be:
2238 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2239 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2241 * The logic here is similar to what we do in save_args()
2243 for (i
= 0; i
< min_t(int, m
->nr_args
, MAX_BPF_FUNC_ARGS
); i
++) {
2244 arg_regs
= (m
->arg_size
[i
] + 7) / 8;
2245 if (nr_regs
+ arg_regs
<= 6) {
2246 for (j
= 0; j
< arg_regs
; j
++) {
2247 emit_ldx(prog
, BPF_DW
,
2248 nr_regs
== 5 ? X86_REG_R9
: BPF_REG_1
+ nr_regs
,
2255 stack_size
-= 8 * arg_regs
;
2263 static int invoke_bpf_prog(const struct btf_func_model
*m
, u8
**pprog
,
2264 struct bpf_tramp_link
*l
, int stack_size
,
2265 int run_ctx_off
, bool save_ret
,
2266 void *image
, void *rw_image
)
2270 int ctx_cookie_off
= offsetof(struct bpf_tramp_run_ctx
, bpf_cookie
);
2271 struct bpf_prog
*p
= l
->link
.prog
;
2272 u64 cookie
= l
->cookie
;
2274 /* mov rdi, cookie */
2275 emit_mov_imm64(&prog
, BPF_REG_1
, (long) cookie
>> 32, (u32
) (long) cookie
);
2277 /* Prepare struct bpf_tramp_run_ctx.
2279 * bpf_tramp_run_ctx is already preserved by
2280 * arch_prepare_bpf_trampoline().
2282 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
2284 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_1
, -run_ctx_off
+ ctx_cookie_off
);
2286 /* arg1: mov rdi, progs[i] */
2287 emit_mov_imm64(&prog
, BPF_REG_1
, (long) p
>> 32, (u32
) (long) p
);
2288 /* arg2: lea rsi, [rbp - ctx_cookie_off] */
2289 if (!is_imm8(-run_ctx_off
))
2290 EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off
);
2292 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off
);
2294 if (emit_rsb_call(&prog
, bpf_trampoline_enter(p
), image
+ (prog
- (u8
*)rw_image
)))
2296 /* remember prog start time returned by __bpf_prog_enter */
2297 emit_mov_reg(&prog
, true, BPF_REG_6
, BPF_REG_0
);
2299 /* if (__bpf_prog_enter*(prog) == 0)
2300 * goto skip_exec_of_prog;
2302 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
2303 /* emit 2 nops that will be replaced with JE insn */
2305 emit_nops(&prog
, 2);
2307 /* arg1: lea rdi, [rbp - stack_size] */
2308 if (!is_imm8(-stack_size
))
2309 EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size
);
2311 EMIT4(0x48, 0x8D, 0x7D, -stack_size
);
2312 /* arg2: progs[i]->insnsi for interpreter */
2314 emit_mov_imm64(&prog
, BPF_REG_2
,
2315 (long) p
->insnsi
>> 32,
2316 (u32
) (long) p
->insnsi
);
2317 /* call JITed bpf program or interpreter */
2318 if (emit_rsb_call(&prog
, p
->bpf_func
, image
+ (prog
- (u8
*)rw_image
)))
2322 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
2323 * of the previous call which is then passed on the stack to
2324 * the next BPF program.
2326 * BPF_TRAMP_FENTRY trampoline may need to return the return
2327 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
2330 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -8);
2332 /* replace 2 nops with JE insn, since jmp target is known */
2333 jmp_insn
[0] = X86_JE
;
2334 jmp_insn
[1] = prog
- jmp_insn
- 2;
2336 /* arg1: mov rdi, progs[i] */
2337 emit_mov_imm64(&prog
, BPF_REG_1
, (long) p
>> 32, (u32
) (long) p
);
2338 /* arg2: mov rsi, rbx <- start time in nsec */
2339 emit_mov_reg(&prog
, true, BPF_REG_2
, BPF_REG_6
);
2340 /* arg3: lea rdx, [rbp - run_ctx_off] */
2341 if (!is_imm8(-run_ctx_off
))
2342 EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off
);
2344 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off
);
2345 if (emit_rsb_call(&prog
, bpf_trampoline_exit(p
), image
+ (prog
- (u8
*)rw_image
)))
2352 static void emit_align(u8
**pprog
, u32 align
)
2354 u8
*target
, *prog
= *pprog
;
2356 target
= PTR_ALIGN(prog
, align
);
2358 emit_nops(&prog
, target
- prog
);
2363 static int emit_cond_near_jump(u8
**pprog
, void *func
, void *ip
, u8 jmp_cond
)
2368 offset
= func
- (ip
+ 2 + 4);
2369 if (!is_simm32(offset
)) {
2370 pr_err("Target %p is out of range\n", func
);
2373 EMIT2_off32(0x0F, jmp_cond
+ 0x10, offset
);
2378 static int invoke_bpf(const struct btf_func_model
*m
, u8
**pprog
,
2379 struct bpf_tramp_links
*tl
, int stack_size
,
2380 int run_ctx_off
, bool save_ret
,
2381 void *image
, void *rw_image
)
2386 for (i
= 0; i
< tl
->nr_links
; i
++) {
2387 if (invoke_bpf_prog(m
, &prog
, tl
->links
[i
], stack_size
,
2388 run_ctx_off
, save_ret
, image
, rw_image
))
2395 static int invoke_bpf_mod_ret(const struct btf_func_model
*m
, u8
**pprog
,
2396 struct bpf_tramp_links
*tl
, int stack_size
,
2397 int run_ctx_off
, u8
**branches
,
2398 void *image
, void *rw_image
)
2403 /* The first fmod_ret program will receive a garbage return value.
2404 * Set this to 0 to avoid confusing the program.
2406 emit_mov_imm32(&prog
, false, BPF_REG_0
, 0);
2407 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -8);
2408 for (i
= 0; i
< tl
->nr_links
; i
++) {
2409 if (invoke_bpf_prog(m
, &prog
, tl
->links
[i
], stack_size
, run_ctx_off
, true,
2413 /* mod_ret prog stored return value into [rbp - 8]. Emit:
2414 * if (*(u64 *)(rbp - 8) != 0)
2417 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
2418 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
2420 /* Save the location of the branch and Generate 6 nops
2421 * (4 bytes for an offset and 2 bytes for the jump) These nops
2422 * are replaced with a conditional jump once do_fexit (i.e. the
2423 * start of the fexit invocation) is finalized.
2426 emit_nops(&prog
, 4 + 2);
2434 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
2435 * its 'struct btf_func_model' will be nr_args=2
2436 * The assembly code when eth_type_trans is executing after trampoline:
2440 * sub rsp, 16 // space for skb and dev
2441 * push rbx // temp regs to pass start time
2442 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
2443 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
2444 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2445 * mov rbx, rax // remember start time in bpf stats are enabled
2446 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
2447 * call addr_of_jited_FENTRY_prog
2448 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2449 * mov rsi, rbx // prog start time
2450 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2451 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
2452 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
2457 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
2458 * replaced with 'call generated_bpf_trampoline'. When it returns
2459 * eth_type_trans will continue executing with original skb and dev pointers.
2461 * The assembly code when eth_type_trans is called from trampoline:
2465 * sub rsp, 24 // space for skb, dev, return value
2466 * push rbx // temp regs to pass start time
2467 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
2468 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
2469 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2470 * mov rbx, rax // remember start time if bpf stats are enabled
2471 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2472 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
2473 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2474 * mov rsi, rbx // prog start time
2475 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2476 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
2477 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
2478 * call eth_type_trans+5 // execute body of eth_type_trans
2479 * mov qword ptr [rbp - 8], rax // save return value
2480 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2481 * mov rbx, rax // remember start time in bpf stats are enabled
2482 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2483 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
2484 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2485 * mov rsi, rbx // prog start time
2486 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2487 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
2490 * add rsp, 8 // skip eth_type_trans's frame
2491 * ret // return to its caller
2493 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image
*im
, void *rw_image
,
2494 void *rw_image_end
, void *image
,
2495 const struct btf_func_model
*m
, u32 flags
,
2496 struct bpf_tramp_links
*tlinks
,
2499 int i
, ret
, nr_regs
= m
->nr_args
, stack_size
= 0;
2500 int regs_off
, nregs_off
, ip_off
, run_ctx_off
, arg_stack_off
, rbx_off
;
2501 struct bpf_tramp_links
*fentry
= &tlinks
[BPF_TRAMP_FENTRY
];
2502 struct bpf_tramp_links
*fexit
= &tlinks
[BPF_TRAMP_FEXIT
];
2503 struct bpf_tramp_links
*fmod_ret
= &tlinks
[BPF_TRAMP_MODIFY_RETURN
];
2504 void *orig_call
= func_addr
;
2505 u8
**branches
= NULL
;
2510 * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
2511 * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
2512 * because @func_addr.
2514 WARN_ON_ONCE((flags
& BPF_TRAMP_F_INDIRECT
) &&
2515 (flags
& ~(BPF_TRAMP_F_INDIRECT
| BPF_TRAMP_F_RET_FENTRY_RET
)));
2517 /* extra registers for struct arguments */
2518 for (i
= 0; i
< m
->nr_args
; i
++) {
2519 if (m
->arg_flags
[i
] & BTF_FMODEL_STRUCT_ARG
)
2520 nr_regs
+= (m
->arg_size
[i
] + 7) / 8 - 1;
2523 /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
2524 * are passed through regs, the remains are through stack.
2526 if (nr_regs
> MAX_BPF_FUNC_ARGS
)
2529 /* Generated trampoline stack layout:
2531 * RBP + 8 [ return address ]
2534 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or
2535 * BPF_TRAMP_F_RET_FENTRY_RET flags
2537 * [ reg_argN ] always
2539 * RBP - regs_off [ reg_arg1 ] program's ctx pointer
2541 * RBP - nregs_off [ regs count ] always
2543 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
2545 * RBP - rbx_off [ rbx value ] always
2547 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2549 * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG
2552 * RBP - arg_stack_off [ stack_arg1 ]
2553 * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
2556 /* room for return value of orig_call or fentry prog */
2557 save_ret
= flags
& (BPF_TRAMP_F_CALL_ORIG
| BPF_TRAMP_F_RET_FENTRY_RET
);
2561 stack_size
+= nr_regs
* 8;
2562 regs_off
= stack_size
;
2566 nregs_off
= stack_size
;
2568 if (flags
& BPF_TRAMP_F_IP_ARG
)
2569 stack_size
+= 8; /* room for IP address argument */
2571 ip_off
= stack_size
;
2574 rbx_off
= stack_size
;
2576 stack_size
+= (sizeof(struct bpf_tramp_run_ctx
) + 7) & ~0x7;
2577 run_ctx_off
= stack_size
;
2579 if (nr_regs
> 6 && (flags
& BPF_TRAMP_F_CALL_ORIG
)) {
2580 /* the space that used to pass arguments on-stack */
2581 stack_size
+= (nr_regs
- get_nr_used_regs(m
)) * 8;
2582 /* make sure the stack pointer is 16-byte aligned if we
2583 * need pass arguments on stack, which means
2584 * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
2585 * should be 16-byte aligned. Following code depend on
2586 * that stack_size is already 8-byte aligned.
2588 stack_size
+= (stack_size
% 16) ? 0 : 8;
2591 arg_stack_off
= stack_size
;
2593 if (flags
& BPF_TRAMP_F_SKIP_FRAME
) {
2594 /* skip patched call instruction and point orig_call to actual
2595 * body of the kernel function.
2597 if (is_endbr(*(u32
*)orig_call
))
2598 orig_call
+= ENDBR_INSN_SIZE
;
2599 orig_call
+= X86_PATCH_SIZE
;
2604 if (flags
& BPF_TRAMP_F_INDIRECT
) {
2606 * Indirect call for bpf_struct_ops
2608 emit_cfi(&prog
, cfi_get_func_hash(func_addr
));
2611 * Direct-call fentry stub, as such it needs accounting for the
2614 x86_call_depth_emit_accounting(&prog
, NULL
);
2616 EMIT1(0x55); /* push rbp */
2617 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2618 if (!is_imm8(stack_size
)) {
2619 /* sub rsp, stack_size */
2620 EMIT3_off32(0x48, 0x81, 0xEC, stack_size
);
2622 /* sub rsp, stack_size */
2623 EMIT4(0x48, 0x83, 0xEC, stack_size
);
2625 if (flags
& BPF_TRAMP_F_TAIL_CALL_CTX
)
2626 EMIT1(0x50); /* push rax */
2627 /* mov QWORD PTR [rbp - rbx_off], rbx */
2628 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_6
, -rbx_off
);
2630 /* Store number of argument registers of the traced function:
2632 * mov QWORD PTR [rbp - nregs_off], rax
2634 emit_mov_imm64(&prog
, BPF_REG_0
, 0, (u32
) nr_regs
);
2635 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -nregs_off
);
2637 if (flags
& BPF_TRAMP_F_IP_ARG
) {
2638 /* Store IP address of the traced function:
2639 * movabsq rax, func_addr
2640 * mov QWORD PTR [rbp - ip_off], rax
2642 emit_mov_imm64(&prog
, BPF_REG_0
, (long) func_addr
>> 32, (u32
) (long) func_addr
);
2643 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -ip_off
);
2646 save_args(m
, &prog
, regs_off
, false);
2648 if (flags
& BPF_TRAMP_F_CALL_ORIG
) {
2649 /* arg1: mov rdi, im */
2650 emit_mov_imm64(&prog
, BPF_REG_1
, (long) im
>> 32, (u32
) (long) im
);
2651 if (emit_rsb_call(&prog
, __bpf_tramp_enter
,
2652 image
+ (prog
- (u8
*)rw_image
))) {
2658 if (fentry
->nr_links
) {
2659 if (invoke_bpf(m
, &prog
, fentry
, regs_off
, run_ctx_off
,
2660 flags
& BPF_TRAMP_F_RET_FENTRY_RET
, image
, rw_image
))
2664 if (fmod_ret
->nr_links
) {
2665 branches
= kcalloc(fmod_ret
->nr_links
, sizeof(u8
*),
2670 if (invoke_bpf_mod_ret(m
, &prog
, fmod_ret
, regs_off
,
2671 run_ctx_off
, branches
, image
, rw_image
)) {
2677 if (flags
& BPF_TRAMP_F_CALL_ORIG
) {
2678 restore_regs(m
, &prog
, regs_off
);
2679 save_args(m
, &prog
, arg_stack_off
, true);
2681 if (flags
& BPF_TRAMP_F_TAIL_CALL_CTX
) {
2682 /* Before calling the original function, restore the
2683 * tail_call_cnt from stack to rax.
2685 RESTORE_TAIL_CALL_CNT(stack_size
);
2688 if (flags
& BPF_TRAMP_F_ORIG_STACK
) {
2689 emit_ldx(&prog
, BPF_DW
, BPF_REG_6
, BPF_REG_FP
, 8);
2690 EMIT2(0xff, 0xd3); /* call *rbx */
2692 /* call original function */
2693 if (emit_rsb_call(&prog
, orig_call
, image
+ (prog
- (u8
*)rw_image
))) {
2698 /* remember return value in a stack for bpf prog to access */
2699 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -8);
2700 im
->ip_after_call
= image
+ (prog
- (u8
*)rw_image
);
2701 emit_nops(&prog
, X86_PATCH_SIZE
);
2704 if (fmod_ret
->nr_links
) {
2705 /* From Intel 64 and IA-32 Architectures Optimization
2706 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2707 * Coding Rule 11: All branch targets should be 16-byte
2710 emit_align(&prog
, 16);
2711 /* Update the branches saved in invoke_bpf_mod_ret with the
2712 * aligned address of do_fexit.
2714 for (i
= 0; i
< fmod_ret
->nr_links
; i
++) {
2715 emit_cond_near_jump(&branches
[i
], image
+ (prog
- (u8
*)rw_image
),
2716 image
+ (branches
[i
] - (u8
*)rw_image
), X86_JNE
);
2720 if (fexit
->nr_links
) {
2721 if (invoke_bpf(m
, &prog
, fexit
, regs_off
, run_ctx_off
,
2722 false, image
, rw_image
)) {
2728 if (flags
& BPF_TRAMP_F_RESTORE_REGS
)
2729 restore_regs(m
, &prog
, regs_off
);
2731 /* This needs to be done regardless. If there were fmod_ret programs,
2732 * the return value is only updated on the stack and still needs to be
2735 if (flags
& BPF_TRAMP_F_CALL_ORIG
) {
2736 im
->ip_epilogue
= image
+ (prog
- (u8
*)rw_image
);
2737 /* arg1: mov rdi, im */
2738 emit_mov_imm64(&prog
, BPF_REG_1
, (long) im
>> 32, (u32
) (long) im
);
2739 if (emit_rsb_call(&prog
, __bpf_tramp_exit
, image
+ (prog
- (u8
*)rw_image
))) {
2743 } else if (flags
& BPF_TRAMP_F_TAIL_CALL_CTX
) {
2744 /* Before running the original function, restore the
2745 * tail_call_cnt from stack to rax.
2747 RESTORE_TAIL_CALL_CNT(stack_size
);
2750 /* restore return value of orig_call or fentry prog back into RAX */
2752 emit_ldx(&prog
, BPF_DW
, BPF_REG_0
, BPF_REG_FP
, -8);
2754 emit_ldx(&prog
, BPF_DW
, BPF_REG_6
, BPF_REG_FP
, -rbx_off
);
2755 EMIT1(0xC9); /* leave */
2756 if (flags
& BPF_TRAMP_F_SKIP_FRAME
) {
2757 /* skip our return address and return to parent */
2758 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2760 emit_return(&prog
, image
+ (prog
- (u8
*)rw_image
));
2761 /* Make sure the trampoline generation logic doesn't overflow */
2762 if (WARN_ON_ONCE(prog
> (u8
*)rw_image_end
- BPF_INSN_SAFETY
)) {
2766 ret
= prog
- (u8
*)rw_image
+ BPF_INSN_SAFETY
;
2773 void *arch_alloc_bpf_trampoline(unsigned int size
)
2775 return bpf_prog_pack_alloc(size
, jit_fill_hole
);
2778 void arch_free_bpf_trampoline(void *image
, unsigned int size
)
2780 bpf_prog_pack_free(image
, size
);
2783 void arch_protect_bpf_trampoline(void *image
, unsigned int size
)
2787 void arch_unprotect_bpf_trampoline(void *image
, unsigned int size
)
2791 int arch_prepare_bpf_trampoline(struct bpf_tramp_image
*im
, void *image
, void *image_end
,
2792 const struct btf_func_model
*m
, u32 flags
,
2793 struct bpf_tramp_links
*tlinks
,
2796 void *rw_image
, *tmp
;
2798 u32 size
= image_end
- image
;
2800 /* rw_image doesn't need to be in module memory range, so we can
2803 rw_image
= kvmalloc(size
, GFP_KERNEL
);
2807 ret
= __arch_prepare_bpf_trampoline(im
, rw_image
, rw_image
+ size
, image
, m
,
2808 flags
, tlinks
, func_addr
);
2812 tmp
= bpf_arch_text_copy(image
, rw_image
, size
);
2820 int arch_bpf_trampoline_size(const struct btf_func_model
*m
, u32 flags
,
2821 struct bpf_tramp_links
*tlinks
, void *func_addr
)
2823 struct bpf_tramp_image im
;
2827 /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
2828 * This will NOT cause fragmentation in direct map, as we do not
2829 * call set_memory_*() on this buffer.
2831 * We cannot use kvmalloc here, because we need image to be in
2832 * module memory range.
2834 image
= bpf_jit_alloc_exec(PAGE_SIZE
);
2838 ret
= __arch_prepare_bpf_trampoline(&im
, image
, image
+ PAGE_SIZE
, image
,
2839 m
, flags
, tlinks
, func_addr
);
2840 bpf_jit_free_exec(image
);
2844 static int emit_bpf_dispatcher(u8
**pprog
, int a
, int b
, s64
*progs
, u8
*image
, u8
*buf
)
2846 u8
*jg_reloc
, *prog
= *pprog
;
2847 int pivot
, err
, jg_bytes
= 1;
2851 /* Leaf node of recursion, i.e. not a range of indices
2854 EMIT1(add_1mod(0x48, BPF_REG_3
)); /* cmp rdx,func */
2855 if (!is_simm32(progs
[a
]))
2857 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3
),
2859 err
= emit_cond_near_jump(&prog
, /* je func */
2860 (void *)progs
[a
], image
+ (prog
- buf
),
2865 emit_indirect_jump(&prog
, 2 /* rdx */, image
+ (prog
- buf
));
2871 /* Not a leaf node, so we pivot, and recursively descend into
2872 * the lower and upper ranges.
2874 pivot
= (b
- a
) / 2;
2875 EMIT1(add_1mod(0x48, BPF_REG_3
)); /* cmp rdx,func */
2876 if (!is_simm32(progs
[a
+ pivot
]))
2878 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3
), progs
[a
+ pivot
]);
2880 if (pivot
> 2) { /* jg upper_part */
2881 /* Require near jump. */
2883 EMIT2_off32(0x0F, X86_JG
+ 0x10, 0);
2889 err
= emit_bpf_dispatcher(&prog
, a
, a
+ pivot
, /* emit lower_part */
2894 /* From Intel 64 and IA-32 Architectures Optimization
2895 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2896 * Coding Rule 11: All branch targets should be 16-byte
2899 emit_align(&prog
, 16);
2900 jg_offset
= prog
- jg_reloc
;
2901 emit_code(jg_reloc
- jg_bytes
, jg_offset
, jg_bytes
);
2903 err
= emit_bpf_dispatcher(&prog
, a
+ pivot
+ 1, /* emit upper_part */
2904 b
, progs
, image
, buf
);
2912 static int cmp_ips(const void *a
, const void *b
)
2924 int arch_prepare_bpf_dispatcher(void *image
, void *buf
, s64
*funcs
, int num_funcs
)
2928 sort(funcs
, num_funcs
, sizeof(funcs
[0]), cmp_ips
, NULL
);
2929 return emit_bpf_dispatcher(&prog
, 0, num_funcs
- 1, funcs
, image
, buf
);
2932 struct x64_jit_data
{
2933 struct bpf_binary_header
*rw_header
;
2934 struct bpf_binary_header
*header
;
2938 struct jit_context ctx
;
2941 #define MAX_PASSES 20
2942 #define PADDING_PASSES (MAX_PASSES - 5)
2944 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
2946 struct bpf_binary_header
*rw_header
= NULL
;
2947 struct bpf_binary_header
*header
= NULL
;
2948 struct bpf_prog
*tmp
, *orig_prog
= prog
;
2949 struct x64_jit_data
*jit_data
;
2950 int proglen
, oldproglen
= 0;
2951 struct jit_context ctx
= {};
2952 bool tmp_blinded
= false;
2953 bool extra_pass
= false;
2954 bool padding
= false;
2955 u8
*rw_image
= NULL
;
2961 if (!prog
->jit_requested
)
2964 tmp
= bpf_jit_blind_constants(prog
);
2966 * If blinding was requested and we failed during blinding,
2967 * we must fall back to the interpreter.
2976 jit_data
= prog
->aux
->jit_data
;
2978 jit_data
= kzalloc(sizeof(*jit_data
), GFP_KERNEL
);
2983 prog
->aux
->jit_data
= jit_data
;
2985 addrs
= jit_data
->addrs
;
2987 ctx
= jit_data
->ctx
;
2988 oldproglen
= jit_data
->proglen
;
2989 image
= jit_data
->image
;
2990 header
= jit_data
->header
;
2991 rw_header
= jit_data
->rw_header
;
2992 rw_image
= (void *)rw_header
+ ((void *)image
- (void *)header
);
2995 goto skip_init_addrs
;
2997 addrs
= kvmalloc_array(prog
->len
+ 1, sizeof(*addrs
), GFP_KERNEL
);
3004 * Before first pass, make a rough estimation of addrs[]
3005 * each BPF instruction is translated to less than 64 bytes
3007 for (proglen
= 0, i
= 0; i
<= prog
->len
; i
++) {
3011 ctx
.cleanup_addr
= proglen
;
3015 * JITed image shrinks with every pass and the loop iterates
3016 * until the image stops shrinking. Very large BPF programs
3017 * may converge on the last pass. In such case do one more
3018 * pass to emit the final image.
3020 for (pass
= 0; pass
< MAX_PASSES
|| image
; pass
++) {
3021 if (!padding
&& pass
>= PADDING_PASSES
)
3023 proglen
= do_jit(prog
, addrs
, image
, rw_image
, oldproglen
, &ctx
, padding
);
3028 bpf_arch_text_copy(&header
->size
, &rw_header
->size
,
3029 sizeof(rw_header
->size
));
3030 bpf_jit_binary_pack_free(header
, rw_header
);
3032 /* Fall back to interpreter mode */
3035 prog
->bpf_func
= NULL
;
3037 prog
->jited_len
= 0;
3042 if (proglen
!= oldproglen
) {
3043 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
3044 proglen
, oldproglen
);
3049 if (proglen
== oldproglen
) {
3051 * The number of entries in extable is the number of BPF_LDX
3052 * insns that access kernel memory via "pointer to BTF type".
3053 * The verifier changed their opcode from LDX|MEM|size
3054 * to LDX|PROBE_MEM|size to make JITing easier.
3056 u32 align
= __alignof__(struct exception_table_entry
);
3057 u32 extable_size
= prog
->aux
->num_exentries
*
3058 sizeof(struct exception_table_entry
);
3060 /* allocate module memory for x86 insns and extable */
3061 header
= bpf_jit_binary_pack_alloc(roundup(proglen
, align
) + extable_size
,
3062 &image
, align
, &rw_header
, &rw_image
,
3068 prog
->aux
->extable
= (void *) image
+ roundup(proglen
, align
);
3070 oldproglen
= proglen
;
3074 if (bpf_jit_enable
> 1)
3075 bpf_jit_dump(prog
->len
, proglen
, pass
+ 1, rw_image
);
3078 if (!prog
->is_func
|| extra_pass
) {
3080 * bpf_jit_binary_pack_finalize fails in two scenarios:
3081 * 1) header is not pointing to proper module memory;
3082 * 2) the arch doesn't support bpf_arch_text_copy().
3084 * Both cases are serious bugs and justify WARN_ON.
3086 if (WARN_ON(bpf_jit_binary_pack_finalize(prog
, header
, rw_header
))) {
3087 /* header has been freed */
3092 bpf_tail_call_direct_fixup(prog
);
3094 jit_data
->addrs
= addrs
;
3095 jit_data
->ctx
= ctx
;
3096 jit_data
->proglen
= proglen
;
3097 jit_data
->image
= image
;
3098 jit_data
->header
= header
;
3099 jit_data
->rw_header
= rw_header
;
3102 * ctx.prog_offset is used when CFI preambles put code *before*
3103 * the function. See emit_cfi(). For FineIBT specifically this code
3104 * can also be executed and bpf_prog_kallsyms_add() will
3105 * generate an additional symbol to cover this, hence also
3106 * decrement proglen.
3108 prog
->bpf_func
= (void *)image
+ cfi_get_offset();
3110 prog
->jited_len
= proglen
- cfi_get_offset();
3115 if (!image
|| !prog
->is_func
|| extra_pass
) {
3117 bpf_prog_fill_jited_linfo(prog
, addrs
+ 1);
3121 prog
->aux
->jit_data
= NULL
;
3125 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?
3130 bool bpf_jit_supports_kfunc_call(void)
3135 void *bpf_arch_text_copy(void *dst
, void *src
, size_t len
)
3137 if (text_poke_copy(dst
, src
, len
) == NULL
)
3138 return ERR_PTR(-EINVAL
);
3142 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
3143 bool bpf_jit_supports_subprog_tailcalls(void)
3148 void bpf_jit_free(struct bpf_prog
*prog
)
3151 struct x64_jit_data
*jit_data
= prog
->aux
->jit_data
;
3152 struct bpf_binary_header
*hdr
;
3155 * If we fail the final pass of JIT (from jit_subprogs),
3156 * the program may not be finalized yet. Call finalize here
3157 * before freeing it.
3160 bpf_jit_binary_pack_finalize(prog
, jit_data
->header
,
3161 jit_data
->rw_header
);
3162 kvfree(jit_data
->addrs
);
3165 prog
->bpf_func
= (void *)prog
->bpf_func
- cfi_get_offset();
3166 hdr
= bpf_jit_binary_pack_hdr(prog
);
3167 bpf_jit_binary_pack_free(hdr
, NULL
);
3168 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog
));
3171 bpf_prog_unlock_free(prog
);
3174 bool bpf_jit_supports_exceptions(void)
3176 /* We unwind through both kernel frames (starting from within bpf_throw
3177 * call) and BPF frames. Therefore we require ORC unwinder to be enabled
3178 * to walk kernel frames and reach BPF frames in the stack trace.
3180 return IS_ENABLED(CONFIG_UNWINDER_ORC
);
3183 void arch_bpf_stack_walk(bool (*consume_fn
)(void *cookie
, u64 ip
, u64 sp
, u64 bp
), void *cookie
)
3185 #if defined(CONFIG_UNWINDER_ORC)
3186 struct unwind_state state
;
3189 for (unwind_start(&state
, current
, NULL
, NULL
); !unwind_done(&state
);
3190 unwind_next_frame(&state
)) {
3191 addr
= unwind_get_return_address(&state
);
3192 if (!addr
|| !consume_fn(cookie
, (u64
)addr
, (u64
)state
.sp
, (u64
)state
.bp
))
3197 WARN(1, "verification of programs using bpf_throw should have failed\n");
3200 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor
*poke
,
3201 struct bpf_prog
*new, struct bpf_prog
*old
)
3203 u8
*old_addr
, *new_addr
, *old_bypass_addr
;
3206 old_bypass_addr
= old
? NULL
: poke
->bypass_addr
;
3207 old_addr
= old
? (u8
*)old
->bpf_func
+ poke
->adj_off
: NULL
;
3208 new_addr
= new ? (u8
*)new->bpf_func
+ poke
->adj_off
: NULL
;
3211 * On program loading or teardown, the program's kallsym entry
3212 * might not be in place, so we use __bpf_arch_text_poke to skip
3213 * the kallsyms check.
3216 ret
= __bpf_arch_text_poke(poke
->tailcall_target
,
3218 old_addr
, new_addr
);
3221 ret
= __bpf_arch_text_poke(poke
->tailcall_bypass
,
3228 ret
= __bpf_arch_text_poke(poke
->tailcall_bypass
,
3233 /* let other CPUs finish the execution of program
3234 * so that it will not possible to expose them
3235 * to invalid nop, stack unwind, nop state
3239 ret
= __bpf_arch_text_poke(poke
->tailcall_target
,