/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
+ if (insn_is_cast_user(insn)) {
+ move_reg(ctx, t1, src);
+ emit_zext_32(ctx, t1, true);
+ move_imm(ctx, dst, (ctx->user_vm_start >> 32) << 32, false);
+ emit_insn(ctx, beq, t1, LOONGARCH_GPR_ZERO, 1);
+ emit_insn(ctx, or, t1, dst, t1);
+ move_reg(ctx, dst, t1);
+ break;
+ }
switch (off) {
case 0:
move_reg(ctx, dst, src);
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
+ ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL);
if (ctx.offset == NULL) {
return true;
}
+bool bpf_jit_supports_arena(void)
+{
+ return true;
+}
+
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bool bpf_jit_supports_subprog_tailcalls(void)
{