]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
cpufreq/amd-pstate: Add trace event for EPP perf updates
authorMario Limonciello <mario.limonciello@amd.com>
Mon, 9 Dec 2024 18:52:36 +0000 (12:52 -0600)
committerMario Limonciello <mario.limonciello@amd.com>
Wed, 11 Dec 2024 16:44:52 +0000 (10:44 -0600)
In "active" mode the most important thing for debugging whether
an issue is hardware or software based is to look at what was the
last thing written to the CPPC request MSR or shared memory region.

The 'amd_pstate_epp_perf' trace event shows the values being written
for all CPUs.

Reviewed-by: Perry Yuan <perry.yuan@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Link: https://lore.kernel.org/r/20241209185248.16301-4-mario.limonciello@amd.com
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
drivers/cpufreq/amd-pstate-trace.h
drivers/cpufreq/amd-pstate.c

index 35f38ae67fb13d88a5cfcaeebf4c57a06aca38e2..e2221a4b6901c5b04df1f9c073f0cea6e57a5199 100644 (file)
@@ -88,6 +88,51 @@ TRACE_EVENT(amd_pstate_perf,
                 )
 );
 
+TRACE_EVENT(amd_pstate_epp_perf,
+
+       TP_PROTO(unsigned int cpu_id,
+                unsigned int highest_perf,
+                unsigned int epp,
+                unsigned int min_perf,
+                unsigned int max_perf,
+                bool boost
+                ),
+
+       TP_ARGS(cpu_id,
+               highest_perf,
+               epp,
+               min_perf,
+               max_perf,
+               boost),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, cpu_id)
+               __field(unsigned int, highest_perf)
+               __field(unsigned int, epp)
+               __field(unsigned int, min_perf)
+               __field(unsigned int, max_perf)
+               __field(bool, boost)
+               ),
+
+       TP_fast_assign(
+               __entry->cpu_id = cpu_id;
+               __entry->highest_perf = highest_perf;
+               __entry->epp = epp;
+               __entry->min_perf = min_perf;
+               __entry->max_perf = max_perf;
+               __entry->boost = boost;
+               ),
+
+       TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
+                 (unsigned int)__entry->cpu_id,
+                 (unsigned int)__entry->min_perf,
+                 (unsigned int)__entry->max_perf,
+                 (unsigned int)__entry->highest_perf,
+                 (unsigned int)__entry->epp,
+                 (bool)__entry->boost
+                )
+);
+
 #endif /* _AMD_PSTATE_TRACE_H */
 
 /* This part must be outside protection */
index 8ce754ead328cce575cbac29f08e8586042ec1bf..20d52bce18821aa7446857a63585a276011a5167 100644 (file)
@@ -324,6 +324,14 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
                return -EBUSY;
        }
 
+       if (trace_amd_pstate_epp_perf_enabled()) {
+               trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+                                         epp,
+                                         AMD_CPPC_MIN_PERF(cpudata->cppc_req_cached),
+                                         AMD_CPPC_MAX_PERF(cpudata->cppc_req_cached),
+                                         cpudata->boost_state);
+       }
+
        ret = amd_pstate_set_epp(cpudata, epp);
 
        return ret;
@@ -1598,6 +1606,13 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
 
        WRITE_ONCE(cpudata->cppc_req_cached, value);
 
+       if (trace_amd_pstate_epp_perf_enabled()) {
+               trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
+                                         cpudata->min_limit_perf,
+                                         cpudata->max_limit_perf,
+                                         policy->boost_enabled);
+       }
+
        amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
                               cpudata->max_limit_perf, false);
 
@@ -1641,6 +1656,13 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
 
        max_perf = READ_ONCE(cpudata->highest_perf);
 
+       if (trace_amd_pstate_epp_perf_enabled()) {
+               trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+                                         cpudata->epp_cached,
+                                         AMD_CPPC_MIN_PERF(cpudata->cppc_req_cached),
+                                         max_perf, cpudata->boost_state);
+       }
+
        amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
        amd_pstate_set_epp(cpudata, cpudata->epp_cached);
 }
@@ -1669,6 +1691,12 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
 
        mutex_lock(&amd_pstate_limits_lock);
 
+       if (trace_amd_pstate_epp_perf_enabled()) {
+               trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+                                         AMD_CPPC_EPP_BALANCE_POWERSAVE,
+                                         min_perf, min_perf, policy->boost_enabled);
+       }
+
        amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
        amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);