bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
bool bpf_jit_supports_private_stack(void);
bool bpf_jit_supports_timed_may_goto(void);
+bool bpf_jit_supports_fsession(void);
u64 bpf_arch_uaddress_limit(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
u64 arch_bpf_timed_may_goto(void);
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
case BPF_TRACE_FSESSION:
+ if (prog->expected_attach_type == BPF_TRACE_FSESSION &&
+ !bpf_jit_supports_fsession()) {
+ bpf_log(log, "JIT does not support fsession\n");
+ return -EOPNOTSUPP;
+ }
if (!btf_type_is_func(t)) {
bpf_log(log, "attach_btf_id %u is not a function\n",
btf_id);
struct fsession_test *skel = NULL;
int err;
- skel = fsession_test__open_and_load();
- if (!ASSERT_OK_PTR(skel, "fsession_test__open_and_load"))
+ skel = fsession_test__open();
+ if (!ASSERT_OK_PTR(skel, "fsession_test__open"))
+ return;
+
+ err = fsession_test__load(skel);
+ if (err == -EOPNOTSUPP) {
+ test__skip();
+ goto cleanup;
+ }
+ if (!ASSERT_OK(err, "fsession_test__load"))
goto cleanup;
err = fsession_test__attach(skel);
struct fsession_test *skel = NULL;
int err;
- skel = fsession_test__open_and_load();
- if (!ASSERT_OK_PTR(skel, "fsession_test__open_and_load"))
+ skel = fsession_test__open();
+ if (!ASSERT_OK_PTR(skel, "fsession_test__open"))
+ return;
+
+ err = fsession_test__load(skel);
+ if (err == -EOPNOTSUPP) {
+ test__skip();
+ goto cleanup;
+ }
+ if (!ASSERT_OK(err, "fsession_test__load"))
goto cleanup;
/* first attach */
bpf_program__set_autoload(skel->progs.test6, false);
err = fsession_test__load(skel);
+ if (err == -EOPNOTSUPP) {
+ test__skip();
+ goto cleanup;
+ }
if (!ASSERT_OK(err, "fsession_test__load"))
goto cleanup;
void test_fsession_test(void)
{
-#if !defined(__x86_64__)
- test__skip();
- return;
-#endif
if (test__start_subtest("fsession_test"))
test_fsession_basic();
if (test__start_subtest("fsession_reattach"))