]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
perf: Remove get_perf_callchain() init_nr argument
authorJosh Poimboeuf <jpoimboe@kernel.org>
Wed, 20 Aug 2025 18:03:39 +0000 (14:03 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 18 Dec 2025 12:54:49 +0000 (13:54 +0100)
[ Upstream commit e649bcda25b5ae1a30a182cc450f928a0b282c93 ]

The 'init_nr' argument has double duty: it's used to initialize both the
number of contexts and the number of stack entries.  That's confusing
and the callers always pass zero anyway.  Hard code the zero.

Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Namhyung Kim <Namhyung@kernel.org>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/r/20250820180428.259565081@kernel.org
Stable-dep-of: 23f852daa4ba ("bpf: Fix stackmap overflow check in __bpf_get_stackid()")
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/linux/perf_event.h
kernel/bpf/stackmap.c
kernel/events/callchain.c
kernel/events/core.c

index ce64b4b937f0686c7b8ec19451779bbfba0fb99f..c2bd4bc45a27b286cc0b92f886e17165aec16ede 100644 (file)
@@ -1602,7 +1602,7 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
 extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
 extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
 extern struct perf_callchain_entry *
-get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
+get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
                   u32 max_stack, bool crosstask, bool add_mark);
 extern int get_callchain_buffers(int max_stack);
 extern void put_callchain_buffers(void);
index 3615c06b7dfa987a5055946c0d6931def5103e3b..ec3a57a5fba1f89853fc550b7d7f66de9fd89f9b 100644 (file)
@@ -314,7 +314,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
        if (max_depth > sysctl_perf_event_max_stack)
                max_depth = sysctl_perf_event_max_stack;
 
-       trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
+       trace = get_perf_callchain(regs, kernel, user, max_depth,
                                   false, false);
 
        if (unlikely(!trace))
@@ -451,7 +451,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
        else if (kernel && task)
                trace = get_callchain_entry_for_task(task, max_depth);
        else
-               trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
+               trace = get_perf_callchain(regs, kernel, user, max_depth,
                                           crosstask, false);
 
        if (unlikely(!trace) || trace->nr < skip) {
index 49d87e6db553f0f5c31bf480caeb82fd7201efeb..677901f456a94e74d24913cac57910684fb00309 100644 (file)
@@ -216,7 +216,7 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr
 }
 
 struct perf_callchain_entry *
-get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
+get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
                   u32 max_stack, bool crosstask, bool add_mark)
 {
        struct perf_callchain_entry *entry;
@@ -231,11 +231,11 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
        if (!entry)
                return NULL;
 
-       ctx.entry     = entry;
-       ctx.max_stack = max_stack;
-       ctx.nr        = entry->nr = init_nr;
-       ctx.contexts       = 0;
-       ctx.contexts_maxed = false;
+       ctx.entry               = entry;
+       ctx.max_stack           = max_stack;
+       ctx.nr                  = entry->nr = 0;
+       ctx.contexts            = 0;
+       ctx.contexts_maxed      = false;
 
        if (kernel && !user_mode(regs)) {
                if (add_mark)
index d6a86d8e9e59b74574dd1ccc47eefe8bfdff9deb..6bc8b84f121563cbc1aa8c8e66953ec7b09d9290 100644 (file)
@@ -7860,7 +7860,7 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
        if (!kernel && !user)
                return &__empty_callchain;
 
-       callchain = get_perf_callchain(regs, 0, kernel, user,
+       callchain = get_perf_callchain(regs, kernel, user,
                                       max_stack, crosstask, true);
        return callchain ?: &__empty_callchain;
 }