]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
cpufreq: Use trace_call__##name() at guarded tracepoint call sites
authorVineeth Pillai (Google) <vineeth@bitbyteword.org>
Mon, 23 Mar 2026 16:00:25 +0000 (12:00 -0400)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Sat, 28 Mar 2026 17:37:06 +0000 (13:37 -0400)
Replace trace_foo() with the new trace_call__foo() at sites already
guarded by trace_foo_enabled(), avoiding a redundant
static_branch_unlikely() re-evaluation inside the tracepoint.
trace_call__foo() calls the tracepoint callbacks directly without
utilizing the static branch again.

Cc: Huang Rui <ray.huang@amd.com>
Cc: Mario Limonciello <mario.limonciello@amd.com>
Cc: Perry Yuan <perry.yuan@amd.com>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: Len Brown <lenb@kernel.org>
Link: https://patch.msgid.link/20260323160052.17528-7-vineeth@bitbyteword.org
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Vineeth Pillai (Google) <vineeth@bitbyteword.org>
Assisted-by: Claude:claude-sonnet-4-6
Acked-by: Rafael J. Wysocki (Intel) <rafael@kernel.org> # cpufreq core
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
drivers/cpufreq/amd-pstate.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c

index 5aa9fcd80cf519bbe16568a212423d64d29d2fb6..4c47324aa2f736ab6143a0a6d1d9ec920c78316d 100644 (file)
@@ -247,7 +247,7 @@ static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
        if (trace_amd_pstate_epp_perf_enabled()) {
                union perf_cached perf = READ_ONCE(cpudata->perf);
 
-               trace_amd_pstate_epp_perf(cpudata->cpu,
+               trace_call__amd_pstate_epp_perf(cpudata->cpu,
                                          perf.highest_perf,
                                          epp,
                                          min_perf,
@@ -298,7 +298,7 @@ static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
        if (trace_amd_pstate_epp_perf_enabled()) {
                union perf_cached perf = cpudata->perf;
 
-               trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+               trace_call__amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
                                          epp,
                                          FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
                                                    cpudata->cppc_req_cached),
@@ -343,7 +343,7 @@ static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
        if (trace_amd_pstate_epp_perf_enabled()) {
                union perf_cached perf = cpudata->perf;
 
-               trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+               trace_call__amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
                                          epp,
                                          FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
                                                    cpudata->cppc_req_cached),
@@ -507,7 +507,7 @@ static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf,
        if (trace_amd_pstate_epp_perf_enabled()) {
                union perf_cached perf = READ_ONCE(cpudata->perf);
 
-               trace_amd_pstate_epp_perf(cpudata->cpu,
+               trace_call__amd_pstate_epp_perf(cpudata->cpu,
                                          perf.highest_perf,
                                          epp,
                                          min_perf,
@@ -588,7 +588,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
        }
 
        if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
-               trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
+               trace_call__amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
                        cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
                                cpudata->cpu, fast_switch);
        }
index 277884d91913c84616104ce49c1960a4d3496680..58901047eae5a0e76a2791f0e92b1611e6c48493 100644 (file)
@@ -2222,7 +2222,7 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
 
        if (trace_cpu_frequency_enabled()) {
                for_each_cpu(cpu, policy->cpus)
-                       trace_cpu_frequency(freq, cpu);
+                       trace_call__cpu_frequency(freq, cpu);
        }
 
        return freq;
index 11c58af41900645154938618caf9bc66d8bb5a23..70be9522091449c115e57e08737cc8439fee39d4 100644 (file)
@@ -3132,7 +3132,7 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
                return;
 
        sample = &cpu->sample;
-       trace_pstate_sample(trace_type,
+       trace_call__pstate_sample(trace_type,
                0,
                old_pstate,
                cpu->pstate.current_pstate,