2 * bpf_jit_comp64.c: eBPF JIT compiler
4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
7 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
14 #include <linux/moduleloader.h>
15 #include <asm/cacheflush.h>
16 #include <linux/netdevice.h>
17 #include <linux/filter.h>
18 #include <linux/if_vlan.h>
19 #include <asm/kprobes.h>
20 #include <linux/bpf.h>
22 #include "bpf_jit64.h"
24 int bpf_jit_enable __read_mostly
;
26 static void bpf_jit_fill_ill_insns(void *area
, unsigned int size
)
28 memset32(area
, BREAKPOINT_INSTRUCTION
, size
/4);
31 static inline void bpf_flush_icache(void *start
, void *end
)
34 flush_icache_range((unsigned long)start
, (unsigned long)end
);
37 static inline bool bpf_is_seen_register(struct codegen_context
*ctx
, int i
)
39 return (ctx
->seen
& (1 << (31 - b2p
[i
])));
42 static inline void bpf_set_seen_register(struct codegen_context
*ctx
, int i
)
44 ctx
->seen
|= (1 << (31 - b2p
[i
]));
47 static inline bool bpf_has_stack_frame(struct codegen_context
*ctx
)
50 * We only need a stack frame if:
51 * - we call other functions (kernel helpers), or
52 * - the bpf program uses its stack area
53 * The latter condition is deduced from the usage of BPF_REG_FP
55 return ctx
->seen
& SEEN_FUNC
|| bpf_is_seen_register(ctx
, BPF_REG_FP
);
59 * When not setting up our own stackframe, the redzone usage is:
61 * [ prev sp ] <-------------
63 * sp (r1) ---> [ stack pointer ] --------------
64 * [ nv gpr save area ] 8*8
67 * [ unused red zone ] 208 bytes protected
69 static int bpf_jit_stack_local(struct codegen_context
*ctx
)
71 if (bpf_has_stack_frame(ctx
))
72 return STACK_FRAME_MIN_SIZE
+ MAX_BPF_STACK
;
74 return -(BPF_PPC_STACK_SAVE
+ 16);
77 static int bpf_jit_stack_tailcallcnt(struct codegen_context
*ctx
)
79 return bpf_jit_stack_local(ctx
) + 8;
82 static int bpf_jit_stack_offsetof(struct codegen_context
*ctx
, int reg
)
84 if (reg
>= BPF_PPC_NVR_MIN
&& reg
< 32)
85 return (bpf_has_stack_frame(ctx
) ? BPF_PPC_STACKFRAME
: 0)
88 pr_err("BPF JIT is asking about unknown registers");
92 static void bpf_jit_emit_skb_loads(u32
*image
, struct codegen_context
*ctx
)
95 * Load skb->len and skb->data_len
98 PPC_LWZ(b2p
[SKB_HLEN_REG
], 3, offsetof(struct sk_buff
, len
));
99 PPC_LWZ(b2p
[TMP_REG_1
], 3, offsetof(struct sk_buff
, data_len
));
100 /* header_len = len - data_len */
101 PPC_SUB(b2p
[SKB_HLEN_REG
], b2p
[SKB_HLEN_REG
], b2p
[TMP_REG_1
]);
103 /* skb->data pointer */
104 PPC_BPF_LL(b2p
[SKB_DATA_REG
], 3, offsetof(struct sk_buff
, data
));
107 static void bpf_jit_build_prologue(u32
*image
, struct codegen_context
*ctx
)
112 * Initialize tail_call_cnt if we do tail calls.
113 * Otherwise, put in NOPs so that it can be skipped when we are
114 * invoked through a tail call.
116 if (ctx
->seen
& SEEN_TAILCALL
) {
117 PPC_LI(b2p
[TMP_REG_1
], 0);
118 /* this goes in the redzone */
119 PPC_BPF_STL(b2p
[TMP_REG_1
], 1, -(BPF_PPC_STACK_SAVE
+ 8));
125 #define BPF_TAILCALL_PROLOGUE_SIZE 8
127 if (bpf_has_stack_frame(ctx
)) {
129 * We need a stack frame, but we don't necessarily need to
130 * save/restore LR unless we call other functions
132 if (ctx
->seen
& SEEN_FUNC
) {
133 EMIT(PPC_INST_MFLR
| __PPC_RT(R0
));
134 PPC_BPF_STL(0, 1, PPC_LR_STKOFF
);
137 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME
);
141 * Back up non-volatile regs -- BPF registers 6-10
142 * If we haven't created our own stack frame, we save these
143 * in the protected zone below the previous stack frame
145 for (i
= BPF_REG_6
; i
<= BPF_REG_10
; i
++)
146 if (bpf_is_seen_register(ctx
, i
))
147 PPC_BPF_STL(b2p
[i
], 1, bpf_jit_stack_offsetof(ctx
, b2p
[i
]));
150 * Save additional non-volatile regs if we cache skb
151 * Also, setup skb data
153 if (ctx
->seen
& SEEN_SKB
) {
154 PPC_BPF_STL(b2p
[SKB_HLEN_REG
], 1,
155 bpf_jit_stack_offsetof(ctx
, b2p
[SKB_HLEN_REG
]));
156 PPC_BPF_STL(b2p
[SKB_DATA_REG
], 1,
157 bpf_jit_stack_offsetof(ctx
, b2p
[SKB_DATA_REG
]));
158 bpf_jit_emit_skb_loads(image
, ctx
);
161 /* Setup frame pointer to point to the bpf stack area */
162 if (bpf_is_seen_register(ctx
, BPF_REG_FP
))
163 PPC_ADDI(b2p
[BPF_REG_FP
], 1,
164 STACK_FRAME_MIN_SIZE
+ MAX_BPF_STACK
);
167 static void bpf_jit_emit_common_epilogue(u32
*image
, struct codegen_context
*ctx
)
172 for (i
= BPF_REG_6
; i
<= BPF_REG_10
; i
++)
173 if (bpf_is_seen_register(ctx
, i
))
174 PPC_BPF_LL(b2p
[i
], 1, bpf_jit_stack_offsetof(ctx
, b2p
[i
]));
176 /* Restore non-volatile registers used for skb cache */
177 if (ctx
->seen
& SEEN_SKB
) {
178 PPC_BPF_LL(b2p
[SKB_HLEN_REG
], 1,
179 bpf_jit_stack_offsetof(ctx
, b2p
[SKB_HLEN_REG
]));
180 PPC_BPF_LL(b2p
[SKB_DATA_REG
], 1,
181 bpf_jit_stack_offsetof(ctx
, b2p
[SKB_DATA_REG
]));
184 /* Tear down our stack frame */
185 if (bpf_has_stack_frame(ctx
)) {
186 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME
);
187 if (ctx
->seen
& SEEN_FUNC
) {
188 PPC_BPF_LL(0, 1, PPC_LR_STKOFF
);
194 static void bpf_jit_build_epilogue(u32
*image
, struct codegen_context
*ctx
)
196 bpf_jit_emit_common_epilogue(image
, ctx
);
198 /* Move result to r3 */
199 PPC_MR(3, b2p
[BPF_REG_0
]);
204 static void bpf_jit_emit_func_call(u32
*image
, struct codegen_context
*ctx
, u64 func
)
206 #ifdef PPC64_ELF_ABI_v1
207 /* func points to the function descriptor */
208 PPC_LI64(b2p
[TMP_REG_2
], func
);
209 /* Load actual entry point from function descriptor */
210 PPC_BPF_LL(b2p
[TMP_REG_1
], b2p
[TMP_REG_2
], 0);
211 /* ... and move it to LR */
212 PPC_MTLR(b2p
[TMP_REG_1
]);
214 * Load TOC from function descriptor at offset 8.
215 * We can clobber r2 since we get called through a
216 * function pointer (so caller will save/restore r2)
217 * and since we don't use a TOC ourself.
219 PPC_BPF_LL(2, b2p
[TMP_REG_2
], 8);
221 /* We can clobber r12 */
222 PPC_FUNC_ADDR(12, func
);
228 static void bpf_jit_emit_tail_call(u32
*image
, struct codegen_context
*ctx
, u32 out
)
231 * By now, the eBPF program has already setup parameters in r3, r4 and r5
232 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
233 * r4/BPF_REG_2 - pointer to bpf_array
234 * r5/BPF_REG_3 - index in bpf_array
236 int b2p_bpf_array
= b2p
[BPF_REG_2
];
237 int b2p_index
= b2p
[BPF_REG_3
];
240 * if (index >= array->map.max_entries)
243 PPC_LWZ(b2p
[TMP_REG_1
], b2p_bpf_array
, offsetof(struct bpf_array
, map
.max_entries
));
244 PPC_RLWINM(b2p_index
, b2p_index
, 0, 0, 31);
245 PPC_CMPLW(b2p_index
, b2p
[TMP_REG_1
]);
246 PPC_BCC(COND_GE
, out
);
249 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
252 PPC_LD(b2p
[TMP_REG_1
], 1, bpf_jit_stack_tailcallcnt(ctx
));
253 PPC_CMPLWI(b2p
[TMP_REG_1
], MAX_TAIL_CALL_CNT
);
254 PPC_BCC(COND_GT
, out
);
259 PPC_ADDI(b2p
[TMP_REG_1
], b2p
[TMP_REG_1
], 1);
260 PPC_BPF_STL(b2p
[TMP_REG_1
], 1, bpf_jit_stack_tailcallcnt(ctx
));
262 /* prog = array->ptrs[index]; */
263 PPC_MULI(b2p
[TMP_REG_1
], b2p_index
, 8);
264 PPC_ADD(b2p
[TMP_REG_1
], b2p
[TMP_REG_1
], b2p_bpf_array
);
265 PPC_LD(b2p
[TMP_REG_1
], b2p
[TMP_REG_1
], offsetof(struct bpf_array
, ptrs
));
271 PPC_CMPLDI(b2p
[TMP_REG_1
], 0);
272 PPC_BCC(COND_EQ
, out
);
274 /* goto *(prog->bpf_func + prologue_size); */
275 PPC_LD(b2p
[TMP_REG_1
], b2p
[TMP_REG_1
], offsetof(struct bpf_prog
, bpf_func
));
276 #ifdef PPC64_ELF_ABI_v1
277 /* skip past the function descriptor */
278 PPC_ADDI(b2p
[TMP_REG_1
], b2p
[TMP_REG_1
],
279 FUNCTION_DESCR_SIZE
+ BPF_TAILCALL_PROLOGUE_SIZE
);
281 PPC_ADDI(b2p
[TMP_REG_1
], b2p
[TMP_REG_1
], BPF_TAILCALL_PROLOGUE_SIZE
);
283 PPC_MTCTR(b2p
[TMP_REG_1
]);
285 /* tear down stack, restore NVRs, ... */
286 bpf_jit_emit_common_epilogue(image
, ctx
);
292 /* Assemble the body code between the prologue & epilogue */
293 static int bpf_jit_build_body(struct bpf_prog
*fp
, u32
*image
,
294 struct codegen_context
*ctx
,
297 const struct bpf_insn
*insn
= fp
->insnsi
;
301 /* Start of epilogue code - will only be valid 2nd pass onwards */
302 u32 exit_addr
= addrs
[flen
];
304 for (i
= 0; i
< flen
; i
++) {
305 u32 code
= insn
[i
].code
;
306 u32 dst_reg
= b2p
[insn
[i
].dst_reg
];
307 u32 src_reg
= b2p
[insn
[i
].src_reg
];
308 s16 off
= insn
[i
].off
;
309 s32 imm
= insn
[i
].imm
;
315 * addrs[] maps a BPF bytecode address into a real offset from
316 * the start of the body code.
318 addrs
[i
] = ctx
->idx
* 4;
321 * As an optimization, we note down which non-volatile registers
322 * are used so that we can only save/restore those in our
323 * prologue and epilogue. We do this here regardless of whether
324 * the actual BPF instruction uses src/dst registers or not
325 * (for instance, BPF_CALL does not use them). The expectation
326 * is that those instructions will have src_reg/dst_reg set to
327 * 0. Even otherwise, we just lose some prologue/epilogue
328 * optimization but everything else should work without
331 if (dst_reg
>= BPF_PPC_NVR_MIN
&& dst_reg
< 32)
332 bpf_set_seen_register(ctx
, insn
[i
].dst_reg
);
333 if (src_reg
>= BPF_PPC_NVR_MIN
&& src_reg
< 32)
334 bpf_set_seen_register(ctx
, insn
[i
].src_reg
);
338 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
340 case BPF_ALU
| BPF_ADD
| BPF_X
: /* (u32) dst += (u32) src */
341 case BPF_ALU64
| BPF_ADD
| BPF_X
: /* dst += src */
342 PPC_ADD(dst_reg
, dst_reg
, src_reg
);
343 goto bpf_alu32_trunc
;
344 case BPF_ALU
| BPF_SUB
| BPF_X
: /* (u32) dst -= (u32) src */
345 case BPF_ALU64
| BPF_SUB
| BPF_X
: /* dst -= src */
346 PPC_SUB(dst_reg
, dst_reg
, src_reg
);
347 goto bpf_alu32_trunc
;
348 case BPF_ALU
| BPF_ADD
| BPF_K
: /* (u32) dst += (u32) imm */
349 case BPF_ALU
| BPF_SUB
| BPF_K
: /* (u32) dst -= (u32) imm */
350 case BPF_ALU64
| BPF_ADD
| BPF_K
: /* dst += imm */
351 case BPF_ALU64
| BPF_SUB
| BPF_K
: /* dst -= imm */
352 if (BPF_OP(code
) == BPF_SUB
)
355 if (imm
>= -32768 && imm
< 32768)
356 PPC_ADDI(dst_reg
, dst_reg
, IMM_L(imm
));
358 PPC_LI32(b2p
[TMP_REG_1
], imm
);
359 PPC_ADD(dst_reg
, dst_reg
, b2p
[TMP_REG_1
]);
362 goto bpf_alu32_trunc
;
363 case BPF_ALU
| BPF_MUL
| BPF_X
: /* (u32) dst *= (u32) src */
364 case BPF_ALU64
| BPF_MUL
| BPF_X
: /* dst *= src */
365 if (BPF_CLASS(code
) == BPF_ALU
)
366 PPC_MULW(dst_reg
, dst_reg
, src_reg
);
368 PPC_MULD(dst_reg
, dst_reg
, src_reg
);
369 goto bpf_alu32_trunc
;
370 case BPF_ALU
| BPF_MUL
| BPF_K
: /* (u32) dst *= (u32) imm */
371 case BPF_ALU64
| BPF_MUL
| BPF_K
: /* dst *= imm */
372 if (imm
>= -32768 && imm
< 32768)
373 PPC_MULI(dst_reg
, dst_reg
, IMM_L(imm
));
375 PPC_LI32(b2p
[TMP_REG_1
], imm
);
376 if (BPF_CLASS(code
) == BPF_ALU
)
377 PPC_MULW(dst_reg
, dst_reg
,
380 PPC_MULD(dst_reg
, dst_reg
,
383 goto bpf_alu32_trunc
;
384 case BPF_ALU
| BPF_DIV
| BPF_X
: /* (u32) dst /= (u32) src */
385 case BPF_ALU
| BPF_MOD
| BPF_X
: /* (u32) dst %= (u32) src */
386 PPC_CMPWI(src_reg
, 0);
387 PPC_BCC_SHORT(COND_NE
, (ctx
->idx
* 4) + 12);
388 PPC_LI(b2p
[BPF_REG_0
], 0);
390 if (BPF_OP(code
) == BPF_MOD
) {
391 PPC_DIVWU(b2p
[TMP_REG_1
], dst_reg
, src_reg
);
392 PPC_MULW(b2p
[TMP_REG_1
], src_reg
,
394 PPC_SUB(dst_reg
, dst_reg
, b2p
[TMP_REG_1
]);
396 PPC_DIVWU(dst_reg
, dst_reg
, src_reg
);
397 goto bpf_alu32_trunc
;
398 case BPF_ALU64
| BPF_DIV
| BPF_X
: /* dst /= src */
399 case BPF_ALU64
| BPF_MOD
| BPF_X
: /* dst %= src */
400 PPC_CMPDI(src_reg
, 0);
401 PPC_BCC_SHORT(COND_NE
, (ctx
->idx
* 4) + 12);
402 PPC_LI(b2p
[BPF_REG_0
], 0);
404 if (BPF_OP(code
) == BPF_MOD
) {
405 PPC_DIVD(b2p
[TMP_REG_1
], dst_reg
, src_reg
);
406 PPC_MULD(b2p
[TMP_REG_1
], src_reg
,
408 PPC_SUB(dst_reg
, dst_reg
, b2p
[TMP_REG_1
]);
410 PPC_DIVD(dst_reg
, dst_reg
, src_reg
);
412 case BPF_ALU
| BPF_MOD
| BPF_K
: /* (u32) dst %= (u32) imm */
413 case BPF_ALU
| BPF_DIV
| BPF_K
: /* (u32) dst /= (u32) imm */
414 case BPF_ALU64
| BPF_MOD
| BPF_K
: /* dst %= imm */
415 case BPF_ALU64
| BPF_DIV
| BPF_K
: /* dst /= imm */
419 goto bpf_alu32_trunc
;
421 PPC_LI32(b2p
[TMP_REG_1
], imm
);
422 switch (BPF_CLASS(code
)) {
424 if (BPF_OP(code
) == BPF_MOD
) {
425 PPC_DIVWU(b2p
[TMP_REG_2
], dst_reg
,
427 PPC_MULW(b2p
[TMP_REG_1
],
430 PPC_SUB(dst_reg
, dst_reg
,
433 PPC_DIVWU(dst_reg
, dst_reg
,
437 if (BPF_OP(code
) == BPF_MOD
) {
438 PPC_DIVD(b2p
[TMP_REG_2
], dst_reg
,
440 PPC_MULD(b2p
[TMP_REG_1
],
443 PPC_SUB(dst_reg
, dst_reg
,
446 PPC_DIVD(dst_reg
, dst_reg
,
450 goto bpf_alu32_trunc
;
451 case BPF_ALU
| BPF_NEG
: /* (u32) dst = -dst */
452 case BPF_ALU64
| BPF_NEG
: /* dst = -dst */
453 PPC_NEG(dst_reg
, dst_reg
);
454 goto bpf_alu32_trunc
;
457 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
459 case BPF_ALU
| BPF_AND
| BPF_X
: /* (u32) dst = dst & src */
460 case BPF_ALU64
| BPF_AND
| BPF_X
: /* dst = dst & src */
461 PPC_AND(dst_reg
, dst_reg
, src_reg
);
462 goto bpf_alu32_trunc
;
463 case BPF_ALU
| BPF_AND
| BPF_K
: /* (u32) dst = dst & imm */
464 case BPF_ALU64
| BPF_AND
| BPF_K
: /* dst = dst & imm */
466 PPC_ANDI(dst_reg
, dst_reg
, IMM_L(imm
));
469 PPC_LI32(b2p
[TMP_REG_1
], imm
);
470 PPC_AND(dst_reg
, dst_reg
, b2p
[TMP_REG_1
]);
472 goto bpf_alu32_trunc
;
473 case BPF_ALU
| BPF_OR
| BPF_X
: /* dst = (u32) dst | (u32) src */
474 case BPF_ALU64
| BPF_OR
| BPF_X
: /* dst = dst | src */
475 PPC_OR(dst_reg
, dst_reg
, src_reg
);
476 goto bpf_alu32_trunc
;
477 case BPF_ALU
| BPF_OR
| BPF_K
:/* dst = (u32) dst | (u32) imm */
478 case BPF_ALU64
| BPF_OR
| BPF_K
:/* dst = dst | imm */
479 if (imm
< 0 && BPF_CLASS(code
) == BPF_ALU64
) {
481 PPC_LI32(b2p
[TMP_REG_1
], imm
);
482 PPC_OR(dst_reg
, dst_reg
, b2p
[TMP_REG_1
]);
485 PPC_ORI(dst_reg
, dst_reg
, IMM_L(imm
));
487 PPC_ORIS(dst_reg
, dst_reg
, IMM_H(imm
));
489 goto bpf_alu32_trunc
;
490 case BPF_ALU
| BPF_XOR
| BPF_X
: /* (u32) dst ^= src */
491 case BPF_ALU64
| BPF_XOR
| BPF_X
: /* dst ^= src */
492 PPC_XOR(dst_reg
, dst_reg
, src_reg
);
493 goto bpf_alu32_trunc
;
494 case BPF_ALU
| BPF_XOR
| BPF_K
: /* (u32) dst ^= (u32) imm */
495 case BPF_ALU64
| BPF_XOR
| BPF_K
: /* dst ^= imm */
496 if (imm
< 0 && BPF_CLASS(code
) == BPF_ALU64
) {
498 PPC_LI32(b2p
[TMP_REG_1
], imm
);
499 PPC_XOR(dst_reg
, dst_reg
, b2p
[TMP_REG_1
]);
502 PPC_XORI(dst_reg
, dst_reg
, IMM_L(imm
));
504 PPC_XORIS(dst_reg
, dst_reg
, IMM_H(imm
));
506 goto bpf_alu32_trunc
;
507 case BPF_ALU
| BPF_LSH
| BPF_X
: /* (u32) dst <<= (u32) src */
508 /* slw clears top 32 bits */
509 PPC_SLW(dst_reg
, dst_reg
, src_reg
);
511 case BPF_ALU64
| BPF_LSH
| BPF_X
: /* dst <<= src; */
512 PPC_SLD(dst_reg
, dst_reg
, src_reg
);
514 case BPF_ALU
| BPF_LSH
| BPF_K
: /* (u32) dst <<== (u32) imm */
515 /* with imm 0, we still need to clear top 32 bits */
516 PPC_SLWI(dst_reg
, dst_reg
, imm
);
518 case BPF_ALU64
| BPF_LSH
| BPF_K
: /* dst <<== imm */
520 PPC_SLDI(dst_reg
, dst_reg
, imm
);
522 case BPF_ALU
| BPF_RSH
| BPF_X
: /* (u32) dst >>= (u32) src */
523 PPC_SRW(dst_reg
, dst_reg
, src_reg
);
525 case BPF_ALU64
| BPF_RSH
| BPF_X
: /* dst >>= src */
526 PPC_SRD(dst_reg
, dst_reg
, src_reg
);
528 case BPF_ALU
| BPF_RSH
| BPF_K
: /* (u32) dst >>= (u32) imm */
529 PPC_SRWI(dst_reg
, dst_reg
, imm
);
531 case BPF_ALU64
| BPF_RSH
| BPF_K
: /* dst >>= imm */
533 PPC_SRDI(dst_reg
, dst_reg
, imm
);
535 case BPF_ALU64
| BPF_ARSH
| BPF_X
: /* (s64) dst >>= src */
536 PPC_SRAD(dst_reg
, dst_reg
, src_reg
);
538 case BPF_ALU64
| BPF_ARSH
| BPF_K
: /* (s64) dst >>= imm */
540 PPC_SRADI(dst_reg
, dst_reg
, imm
);
546 case BPF_ALU
| BPF_MOV
| BPF_X
: /* (u32) dst = src */
547 case BPF_ALU64
| BPF_MOV
| BPF_X
: /* dst = src */
548 PPC_MR(dst_reg
, src_reg
);
549 goto bpf_alu32_trunc
;
550 case BPF_ALU
| BPF_MOV
| BPF_K
: /* (u32) dst = imm */
551 case BPF_ALU64
| BPF_MOV
| BPF_K
: /* dst = (s64) imm */
552 PPC_LI32(dst_reg
, imm
);
554 goto bpf_alu32_trunc
;
558 /* Truncate to 32-bits */
559 if (BPF_CLASS(code
) == BPF_ALU
)
560 PPC_RLWINM(dst_reg
, dst_reg
, 0, 0, 31);
566 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
567 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
568 #ifdef __BIG_ENDIAN__
569 if (BPF_SRC(code
) == BPF_FROM_BE
)
571 #else /* !__BIG_ENDIAN__ */
572 if (BPF_SRC(code
) == BPF_FROM_LE
)
577 /* Rotate 8 bits left & mask with 0x0000ff00 */
578 PPC_RLWINM(b2p
[TMP_REG_1
], dst_reg
, 8, 16, 23);
579 /* Rotate 8 bits right & insert LSB to reg */
580 PPC_RLWIMI(b2p
[TMP_REG_1
], dst_reg
, 24, 24, 31);
581 /* Move result back to dst_reg */
582 PPC_MR(dst_reg
, b2p
[TMP_REG_1
]);
586 * Rotate word left by 8 bits:
587 * 2 bytes are already in their final position
588 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
590 PPC_RLWINM(b2p
[TMP_REG_1
], dst_reg
, 8, 0, 31);
591 /* Rotate 24 bits and insert byte 1 */
592 PPC_RLWIMI(b2p
[TMP_REG_1
], dst_reg
, 24, 0, 7);
593 /* Rotate 24 bits and insert byte 3 */
594 PPC_RLWIMI(b2p
[TMP_REG_1
], dst_reg
, 24, 16, 23);
595 PPC_MR(dst_reg
, b2p
[TMP_REG_1
]);
599 * Way easier and faster(?) to store the value
600 * into stack and then use ldbrx
602 * ctx->seen will be reliable in pass2, but
603 * the instructions generated will remain the
604 * same across all passes
606 PPC_STD(dst_reg
, 1, bpf_jit_stack_local(ctx
));
607 PPC_ADDI(b2p
[TMP_REG_1
], 1, bpf_jit_stack_local(ctx
));
608 PPC_LDBRX(dst_reg
, 0, b2p
[TMP_REG_1
]);
616 /* zero-extend 16 bits into 64 bits */
617 PPC_RLDICL(dst_reg
, dst_reg
, 0, 48);
620 /* zero-extend 32 bits into 64 bits */
621 PPC_RLDICL(dst_reg
, dst_reg
, 0, 32);
632 case BPF_STX
| BPF_MEM
| BPF_B
: /* *(u8 *)(dst + off) = src */
633 case BPF_ST
| BPF_MEM
| BPF_B
: /* *(u8 *)(dst + off) = imm */
634 if (BPF_CLASS(code
) == BPF_ST
) {
635 PPC_LI(b2p
[TMP_REG_1
], imm
);
636 src_reg
= b2p
[TMP_REG_1
];
638 PPC_STB(src_reg
, dst_reg
, off
);
640 case BPF_STX
| BPF_MEM
| BPF_H
: /* (u16 *)(dst + off) = src */
641 case BPF_ST
| BPF_MEM
| BPF_H
: /* (u16 *)(dst + off) = imm */
642 if (BPF_CLASS(code
) == BPF_ST
) {
643 PPC_LI(b2p
[TMP_REG_1
], imm
);
644 src_reg
= b2p
[TMP_REG_1
];
646 PPC_STH(src_reg
, dst_reg
, off
);
648 case BPF_STX
| BPF_MEM
| BPF_W
: /* *(u32 *)(dst + off) = src */
649 case BPF_ST
| BPF_MEM
| BPF_W
: /* *(u32 *)(dst + off) = imm */
650 if (BPF_CLASS(code
) == BPF_ST
) {
651 PPC_LI32(b2p
[TMP_REG_1
], imm
);
652 src_reg
= b2p
[TMP_REG_1
];
654 PPC_STW(src_reg
, dst_reg
, off
);
656 case BPF_STX
| BPF_MEM
| BPF_DW
: /* (u64 *)(dst + off) = src */
657 case BPF_ST
| BPF_MEM
| BPF_DW
: /* *(u64 *)(dst + off) = imm */
658 if (BPF_CLASS(code
) == BPF_ST
) {
659 PPC_LI32(b2p
[TMP_REG_1
], imm
);
660 src_reg
= b2p
[TMP_REG_1
];
662 PPC_STD(src_reg
, dst_reg
, off
);
666 * BPF_STX XADD (atomic_add)
668 /* *(u32 *)(dst + off) += src */
669 case BPF_STX
| BPF_XADD
| BPF_W
:
670 /* Get EA into TMP_REG_1 */
671 PPC_ADDI(b2p
[TMP_REG_1
], dst_reg
, off
);
672 /* error if EA is not word-aligned */
673 PPC_ANDI(b2p
[TMP_REG_2
], b2p
[TMP_REG_1
], 0x03);
674 PPC_BCC_SHORT(COND_EQ
, (ctx
->idx
* 4) + 12);
675 PPC_LI(b2p
[BPF_REG_0
], 0);
677 /* load value from memory into TMP_REG_2 */
678 PPC_BPF_LWARX(b2p
[TMP_REG_2
], 0, b2p
[TMP_REG_1
], 0);
679 /* add value from src_reg into this */
680 PPC_ADD(b2p
[TMP_REG_2
], b2p
[TMP_REG_2
], src_reg
);
681 /* store result back */
682 PPC_BPF_STWCX(b2p
[TMP_REG_2
], 0, b2p
[TMP_REG_1
]);
683 /* we're done if this succeeded */
684 PPC_BCC_SHORT(COND_EQ
, (ctx
->idx
* 4) + (7*4));
685 /* otherwise, let's try once more */
686 PPC_BPF_LWARX(b2p
[TMP_REG_2
], 0, b2p
[TMP_REG_1
], 0);
687 PPC_ADD(b2p
[TMP_REG_2
], b2p
[TMP_REG_2
], src_reg
);
688 PPC_BPF_STWCX(b2p
[TMP_REG_2
], 0, b2p
[TMP_REG_1
]);
689 /* exit if the store was not successful */
690 PPC_LI(b2p
[BPF_REG_0
], 0);
691 PPC_BCC(COND_NE
, exit_addr
);
693 /* *(u64 *)(dst + off) += src */
694 case BPF_STX
| BPF_XADD
| BPF_DW
:
695 PPC_ADDI(b2p
[TMP_REG_1
], dst_reg
, off
);
696 /* error if EA is not doubleword-aligned */
697 PPC_ANDI(b2p
[TMP_REG_2
], b2p
[TMP_REG_1
], 0x07);
698 PPC_BCC_SHORT(COND_EQ
, (ctx
->idx
* 4) + (3*4));
699 PPC_LI(b2p
[BPF_REG_0
], 0);
701 PPC_BPF_LDARX(b2p
[TMP_REG_2
], 0, b2p
[TMP_REG_1
], 0);
702 PPC_ADD(b2p
[TMP_REG_2
], b2p
[TMP_REG_2
], src_reg
);
703 PPC_BPF_STDCX(b2p
[TMP_REG_2
], 0, b2p
[TMP_REG_1
]);
704 PPC_BCC_SHORT(COND_EQ
, (ctx
->idx
* 4) + (7*4));
705 PPC_BPF_LDARX(b2p
[TMP_REG_2
], 0, b2p
[TMP_REG_1
], 0);
706 PPC_ADD(b2p
[TMP_REG_2
], b2p
[TMP_REG_2
], src_reg
);
707 PPC_BPF_STDCX(b2p
[TMP_REG_2
], 0, b2p
[TMP_REG_1
]);
708 PPC_LI(b2p
[BPF_REG_0
], 0);
709 PPC_BCC(COND_NE
, exit_addr
);
715 /* dst = *(u8 *)(ul) (src + off) */
716 case BPF_LDX
| BPF_MEM
| BPF_B
:
717 PPC_LBZ(dst_reg
, src_reg
, off
);
719 /* dst = *(u16 *)(ul) (src + off) */
720 case BPF_LDX
| BPF_MEM
| BPF_H
:
721 PPC_LHZ(dst_reg
, src_reg
, off
);
723 /* dst = *(u32 *)(ul) (src + off) */
724 case BPF_LDX
| BPF_MEM
| BPF_W
:
725 PPC_LWZ(dst_reg
, src_reg
, off
);
727 /* dst = *(u64 *)(ul) (src + off) */
728 case BPF_LDX
| BPF_MEM
| BPF_DW
:
729 PPC_LD(dst_reg
, src_reg
, off
);
734 * 16 byte instruction that uses two 'struct bpf_insn'
736 case BPF_LD
| BPF_IMM
| BPF_DW
: /* dst = (u64) imm */
737 imm64
= ((u64
)(u32
) insn
[i
].imm
) |
738 (((u64
)(u32
) insn
[i
+1].imm
) << 32);
739 /* Adjust for two bpf instructions */
740 addrs
[++i
] = ctx
->idx
* 4;
741 PPC_LI64(dst_reg
, imm64
);
747 case BPF_JMP
| BPF_EXIT
:
749 * If this isn't the very last instruction, branch to
750 * the epilogue. If we _are_ the last instruction,
751 * we'll just fall through to the epilogue.
755 /* else fall through to the epilogue */
761 case BPF_JMP
| BPF_CALL
:
762 ctx
->seen
|= SEEN_FUNC
;
763 func
= (u8
*) __bpf_call_base
+ imm
;
765 /* Save skb pointer if we need to re-cache skb data */
766 if ((ctx
->seen
& SEEN_SKB
) &&
767 bpf_helper_changes_pkt_data(func
))
768 PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx
));
770 bpf_jit_emit_func_call(image
, ctx
, (u64
)func
);
772 /* move return value from r3 to BPF_REG_0 */
773 PPC_MR(b2p
[BPF_REG_0
], 3);
775 /* refresh skb cache */
776 if ((ctx
->seen
& SEEN_SKB
) &&
777 bpf_helper_changes_pkt_data(func
)) {
778 /* reload skb pointer to r3 */
779 PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx
));
780 bpf_jit_emit_skb_loads(image
, ctx
);
787 case BPF_JMP
| BPF_JA
:
788 PPC_JMP(addrs
[i
+ 1 + off
]);
791 case BPF_JMP
| BPF_JGT
| BPF_K
:
792 case BPF_JMP
| BPF_JGT
| BPF_X
:
793 case BPF_JMP
| BPF_JSGT
| BPF_K
:
794 case BPF_JMP
| BPF_JSGT
| BPF_X
:
797 case BPF_JMP
| BPF_JLT
| BPF_K
:
798 case BPF_JMP
| BPF_JLT
| BPF_X
:
799 case BPF_JMP
| BPF_JSLT
| BPF_K
:
800 case BPF_JMP
| BPF_JSLT
| BPF_X
:
803 case BPF_JMP
| BPF_JGE
| BPF_K
:
804 case BPF_JMP
| BPF_JGE
| BPF_X
:
805 case BPF_JMP
| BPF_JSGE
| BPF_K
:
806 case BPF_JMP
| BPF_JSGE
| BPF_X
:
809 case BPF_JMP
| BPF_JLE
| BPF_K
:
810 case BPF_JMP
| BPF_JLE
| BPF_X
:
811 case BPF_JMP
| BPF_JSLE
| BPF_K
:
812 case BPF_JMP
| BPF_JSLE
| BPF_X
:
815 case BPF_JMP
| BPF_JEQ
| BPF_K
:
816 case BPF_JMP
| BPF_JEQ
| BPF_X
:
819 case BPF_JMP
| BPF_JNE
| BPF_K
:
820 case BPF_JMP
| BPF_JNE
| BPF_X
:
823 case BPF_JMP
| BPF_JSET
| BPF_K
:
824 case BPF_JMP
| BPF_JSET
| BPF_X
:
830 case BPF_JMP
| BPF_JGT
| BPF_X
:
831 case BPF_JMP
| BPF_JLT
| BPF_X
:
832 case BPF_JMP
| BPF_JGE
| BPF_X
:
833 case BPF_JMP
| BPF_JLE
| BPF_X
:
834 case BPF_JMP
| BPF_JEQ
| BPF_X
:
835 case BPF_JMP
| BPF_JNE
| BPF_X
:
836 /* unsigned comparison */
837 PPC_CMPLD(dst_reg
, src_reg
);
839 case BPF_JMP
| BPF_JSGT
| BPF_X
:
840 case BPF_JMP
| BPF_JSLT
| BPF_X
:
841 case BPF_JMP
| BPF_JSGE
| BPF_X
:
842 case BPF_JMP
| BPF_JSLE
| BPF_X
:
843 /* signed comparison */
844 PPC_CMPD(dst_reg
, src_reg
);
846 case BPF_JMP
| BPF_JSET
| BPF_X
:
847 PPC_AND_DOT(b2p
[TMP_REG_1
], dst_reg
, src_reg
);
849 case BPF_JMP
| BPF_JNE
| BPF_K
:
850 case BPF_JMP
| BPF_JEQ
| BPF_K
:
851 case BPF_JMP
| BPF_JGT
| BPF_K
:
852 case BPF_JMP
| BPF_JLT
| BPF_K
:
853 case BPF_JMP
| BPF_JGE
| BPF_K
:
854 case BPF_JMP
| BPF_JLE
| BPF_K
:
856 * Need sign-extended load, so only positive
857 * values can be used as imm in cmpldi
859 if (imm
>= 0 && imm
< 32768)
860 PPC_CMPLDI(dst_reg
, imm
);
862 /* sign-extending load */
863 PPC_LI32(b2p
[TMP_REG_1
], imm
);
864 /* ... but unsigned comparison */
865 PPC_CMPLD(dst_reg
, b2p
[TMP_REG_1
]);
868 case BPF_JMP
| BPF_JSGT
| BPF_K
:
869 case BPF_JMP
| BPF_JSLT
| BPF_K
:
870 case BPF_JMP
| BPF_JSGE
| BPF_K
:
871 case BPF_JMP
| BPF_JSLE
| BPF_K
:
873 * signed comparison, so any 16-bit value
874 * can be used in cmpdi
876 if (imm
>= -32768 && imm
< 32768)
877 PPC_CMPDI(dst_reg
, imm
);
879 PPC_LI32(b2p
[TMP_REG_1
], imm
);
880 PPC_CMPD(dst_reg
, b2p
[TMP_REG_1
]);
883 case BPF_JMP
| BPF_JSET
| BPF_K
:
884 /* andi does not sign-extend the immediate */
885 if (imm
>= 0 && imm
< 32768)
886 /* PPC_ANDI is _only/always_ dot-form */
887 PPC_ANDI(b2p
[TMP_REG_1
], dst_reg
, imm
);
889 PPC_LI32(b2p
[TMP_REG_1
], imm
);
890 PPC_AND_DOT(b2p
[TMP_REG_1
], dst_reg
,
895 PPC_BCC(true_cond
, addrs
[i
+ 1 + off
]);
899 * Loads from packet header/data
900 * Assume 32-bit input value in imm and X (src_reg)
904 case BPF_LD
| BPF_W
| BPF_ABS
:
905 func
= (u8
*)CHOOSE_LOAD_FUNC(imm
, sk_load_word
);
906 goto common_load_abs
;
907 case BPF_LD
| BPF_H
| BPF_ABS
:
908 func
= (u8
*)CHOOSE_LOAD_FUNC(imm
, sk_load_half
);
909 goto common_load_abs
;
910 case BPF_LD
| BPF_B
| BPF_ABS
:
911 func
= (u8
*)CHOOSE_LOAD_FUNC(imm
, sk_load_byte
);
915 * Load into r4, which can just be passed onto
916 * skb load helpers as the second parameter
922 case BPF_LD
| BPF_W
| BPF_IND
:
923 func
= (u8
*)sk_load_word
;
924 goto common_load_ind
;
925 case BPF_LD
| BPF_H
| BPF_IND
:
926 func
= (u8
*)sk_load_half
;
927 goto common_load_ind
;
928 case BPF_LD
| BPF_B
| BPF_IND
:
929 func
= (u8
*)sk_load_byte
;
932 * Load from [src_reg + imm]
933 * Treat src_reg as a 32-bit value
935 PPC_EXTSW(4, src_reg
);
937 if (imm
>= -32768 && imm
< 32768)
938 PPC_ADDI(4, 4, IMM_L(imm
));
940 PPC_LI32(b2p
[TMP_REG_1
], imm
);
941 PPC_ADD(4, 4, b2p
[TMP_REG_1
]);
946 ctx
->seen
|= SEEN_SKB
;
947 ctx
->seen
|= SEEN_FUNC
;
948 bpf_jit_emit_func_call(image
, ctx
, (u64
)func
);
951 * Helper returns 'lt' condition on error, and an
952 * appropriate return value in BPF_REG_0
954 PPC_BCC(COND_LT
, exit_addr
);
960 case BPF_JMP
| BPF_TAIL_CALL
:
961 ctx
->seen
|= SEEN_TAILCALL
;
962 bpf_jit_emit_tail_call(image
, ctx
, addrs
[i
+ 1]);
967 * The filter contains something cruel & unusual.
968 * We don't handle it, but also there shouldn't be
969 * anything missing from our list.
971 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
977 /* Set end-of-body-code address for exit. */
978 addrs
[i
] = ctx
->idx
* 4;
983 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*fp
)
990 struct codegen_context cgctx
;
993 struct bpf_binary_header
*bpf_hdr
;
994 struct bpf_prog
*org_fp
= fp
;
995 struct bpf_prog
*tmp_fp
;
996 bool bpf_blinded
= false;
1001 tmp_fp
= bpf_jit_blind_constants(org_fp
);
1005 if (tmp_fp
!= org_fp
) {
1011 addrs
= kzalloc((flen
+1) * sizeof(*addrs
), GFP_KERNEL
);
1012 if (addrs
== NULL
) {
1017 memset(&cgctx
, 0, sizeof(struct codegen_context
));
1019 /* Scouting faux-generate pass 0 */
1020 if (bpf_jit_build_body(fp
, 0, &cgctx
, addrs
)) {
1021 /* We hit something illegal or unsupported. */
1027 * Pretend to build prologue, given the features we've seen. This will
1028 * update ctgtx.idx as it pretends to output instructions, then we can
1029 * calculate total size from idx.
1031 bpf_jit_build_prologue(0, &cgctx
);
1032 bpf_jit_build_epilogue(0, &cgctx
);
1034 proglen
= cgctx
.idx
* 4;
1035 alloclen
= proglen
+ FUNCTION_DESCR_SIZE
;
1037 bpf_hdr
= bpf_jit_binary_alloc(alloclen
, &image
, 4,
1038 bpf_jit_fill_ill_insns
);
1044 code_base
= (u32
*)(image
+ FUNCTION_DESCR_SIZE
);
1046 /* Code generation passes 1-2 */
1047 for (pass
= 1; pass
< 3; pass
++) {
1048 /* Now build the prologue, body code & epilogue for real. */
1050 bpf_jit_build_prologue(code_base
, &cgctx
);
1051 bpf_jit_build_body(fp
, code_base
, &cgctx
, addrs
);
1052 bpf_jit_build_epilogue(code_base
, &cgctx
);
1054 if (bpf_jit_enable
> 1)
1055 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass
,
1056 proglen
- (cgctx
.idx
* 4), cgctx
.seen
);
1059 if (bpf_jit_enable
> 1)
1061 * Note that we output the base address of the code_base
1062 * rather than image, since opcodes are in code_base.
1064 bpf_jit_dump(flen
, proglen
, pass
, code_base
);
1066 #ifdef PPC64_ELF_ABI_v1
1067 /* Function descriptor nastiness: Address + TOC */
1068 ((u64
*)image
)[0] = (u64
)code_base
;
1069 ((u64
*)image
)[1] = local_paca
->kernel_toc
;
1072 fp
->bpf_func
= (void *)image
;
1074 fp
->jited_len
= alloclen
;
1076 bpf_flush_icache(bpf_hdr
, (u8
*)bpf_hdr
+ (bpf_hdr
->pages
* PAGE_SIZE
));
1082 bpf_jit_prog_release_other(fp
, fp
== org_fp
? tmp_fp
: org_fp
);
1087 /* Overriding bpf_jit_free() as we don't set images read-only. */
1088 void bpf_jit_free(struct bpf_prog
*fp
)
1090 unsigned long addr
= (unsigned long)fp
->bpf_func
& PAGE_MASK
;
1091 struct bpf_binary_header
*bpf_hdr
= (void *)addr
;
1094 bpf_jit_binary_free(bpf_hdr
);
1096 bpf_prog_unlock_free(fp
);