]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
ftrace,bpf: Remove FTRACE_OPS_FL_JMP ftrace_ops flag
authorJiri Olsa <jolsa@kernel.org>
Tue, 30 Dec 2025 14:50:02 +0000 (15:50 +0100)
committerAndrii Nakryiko <andrii@kernel.org>
Wed, 28 Jan 2026 19:44:35 +0000 (11:44 -0800)
At the moment the we allow the jmp attach only for ftrace_ops that
has FTRACE_OPS_FL_JMP set. This conflicts with following changes
where we use single ftrace_ops object for all direct call sites,
so all could be be attached via just call or jmp.

We already limit the jmp attach support with config option and bit
(LSB) set on the trampoline address. It turns out that's actually
enough to limit the jmp attach for architecture and only for chosen
addresses (with LSB bit set).

Each user of register_ftrace_direct or modify_ftrace_direct can set
the trampoline bit (LSB) to indicate it has to be attached by jmp.

The bpf trampoline generation code uses trampoline flags to generate
jmp-attach specific code and ftrace inner code uses the trampoline
bit (LSB) to handle return from jmp attachment, so there's no harm
to remove the FTRACE_OPS_FL_JMP bit.

The fexit/fmodret performance stays the same (did not drop),
current code:

  fentry         :   77.904 ± 0.546M/s
  fexit          :   62.430 ± 0.554M/s
  fmodret        :   66.503 ± 0.902M/s

with this change:

  fentry         :   80.472 ± 0.061M/s
  fexit          :   63.995 ± 0.127M/s
  fmodret        :   67.362 ± 0.175M/s

Fixes: 25e4e3565d45 ("ftrace: Introduce FTRACE_OPS_FL_JMP")
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Link: https://lore.kernel.org/bpf/20251230145010.103439-2-jolsa@kernel.org
include/linux/ftrace.h
kernel/bpf/trampoline.c
kernel/trace/ftrace.c

index a3a8989e3268da477e5d77fab7aec12ed34eaa2c..cc869c59c1a68ad999cc3391c29b04005101d669 100644 (file)
@@ -359,7 +359,6 @@ enum {
        FTRACE_OPS_FL_DIRECT                    = BIT(17),
        FTRACE_OPS_FL_SUBOP                     = BIT(18),
        FTRACE_OPS_FL_GRAPH                     = BIT(19),
-       FTRACE_OPS_FL_JMP                       = BIT(20),
 };
 
 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
index edf9da43762d7e8a2bcdc443e52b8c225633c0e7..468de930b2eda0828b116d1504385b1f2195e1f5 100644 (file)
@@ -221,10 +221,15 @@ static int modify_fentry(struct bpf_trampoline *tr, u32 orig_flags,
        int ret;
 
        if (tr->func.ftrace_managed) {
+               unsigned long addr = (unsigned long) new_addr;
+
+               if (bpf_trampoline_use_jmp(tr->flags))
+                       addr = ftrace_jmp_set(addr);
+
                if (lock_direct_mutex)
-                       ret = modify_ftrace_direct(tr->fops, (long)new_addr);
+                       ret = modify_ftrace_direct(tr->fops, addr);
                else
-                       ret = modify_ftrace_direct_nolock(tr->fops, (long)new_addr);
+                       ret = modify_ftrace_direct_nolock(tr->fops, addr);
        } else {
                ret = bpf_trampoline_update_fentry(tr, orig_flags, old_addr,
                                                   new_addr);
@@ -247,10 +252,15 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
        }
 
        if (tr->func.ftrace_managed) {
+               unsigned long addr = (unsigned long) new_addr;
+
+               if (bpf_trampoline_use_jmp(tr->flags))
+                       addr = ftrace_jmp_set(addr);
+
                ret = ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
                if (ret)
                        return ret;
-               ret = register_ftrace_direct(tr->fops, (long)new_addr);
+               ret = register_ftrace_direct(tr->fops, addr);
        } else {
                ret = bpf_trampoline_update_fentry(tr, 0, NULL, new_addr);
        }
@@ -506,13 +516,6 @@ again:
        if (err)
                goto out_free;
 
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_JMP
-       if (bpf_trampoline_use_jmp(tr->flags))
-               tr->fops->flags |= FTRACE_OPS_FL_JMP;
-       else
-               tr->fops->flags &= ~FTRACE_OPS_FL_JMP;
-#endif
-
        WARN_ON(tr->cur_image && total == 0);
        if (tr->cur_image)
                /* progs already running at this address */
@@ -540,15 +543,8 @@ again:
        tr->cur_image = im;
 out:
        /* If any error happens, restore previous flags */
-       if (err) {
+       if (err)
                tr->flags = orig_flags;
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_JMP
-               if (bpf_trampoline_use_jmp(tr->flags))
-                       tr->fops->flags |= FTRACE_OPS_FL_JMP;
-               else
-                       tr->fops->flags &= ~FTRACE_OPS_FL_JMP;
-#endif
-       }
        kfree(tlinks);
        return err;
 
index ef2d5dca6f70c6a3c34e2490bb5821c1e2e97748..6040aacc97dadbff99bf065b8877809d734caf03 100644 (file)
@@ -6046,15 +6046,8 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
        if (ftrace_hash_empty(hash))
                return -EINVAL;
 
-       /* This is a "raw" address, and this should never happen. */
-       if (WARN_ON_ONCE(ftrace_is_jmp(addr)))
-               return -EINVAL;
-
        mutex_lock(&direct_mutex);
 
-       if (ops->flags & FTRACE_OPS_FL_JMP)
-               addr = ftrace_jmp_set(addr);
-
        /* Make sure requested entries are not already registered.. */
        size = 1 << hash->size_bits;
        for (i = 0; i < size; i++) {
@@ -6175,13 +6168,6 @@ __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
 
        lockdep_assert_held_once(&direct_mutex);
 
-       /* This is a "raw" address, and this should never happen. */
-       if (WARN_ON_ONCE(ftrace_is_jmp(addr)))
-               return -EINVAL;
-
-       if (ops->flags & FTRACE_OPS_FL_JMP)
-               addr = ftrace_jmp_set(addr);
-
        /* Enable the tmp_ops to have the same functions as the direct ops */
        ftrace_ops_init(&tmp_ops);
        tmp_ops.func_hash = ops->func_hash;