]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
cpufreq/amd-pstate: Move all EPP tracing into *_update_perf and *_set_epp functions
authorMario Limonciello <mario.limonciello@amd.com>
Mon, 9 Dec 2024 17:57:38 +0000 (11:57 -0600)
committerMario Limonciello <mario.limonciello@amd.com>
Thu, 6 Mar 2025 19:01:25 +0000 (13:01 -0600)
The EPP tracing is done by the caller today, but this precludes the
information about whether the CPPC request has changed.

Move it into the update_perf and set_epp functions and include information
about whether the request has changed from the last one.
amd_pstate_update_perf() and amd_pstate_set_epp() now require the policy
as an argument instead of the cpudata.

Reviewed-by: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
drivers/cpufreq/amd-pstate-trace.h
drivers/cpufreq/amd-pstate.c

index f457d4af2c62e5e7828946773fae7fcd0d15e657..32e1bdc588c52d33f57faaa2ce39d37c22621f89 100644 (file)
@@ -90,7 +90,8 @@ TRACE_EVENT(amd_pstate_epp_perf,
                 u8 epp,
                 u8 min_perf,
                 u8 max_perf,
-                bool boost
+                bool boost,
+                bool changed
                 ),
 
        TP_ARGS(cpu_id,
@@ -98,7 +99,8 @@ TRACE_EVENT(amd_pstate_epp_perf,
                epp,
                min_perf,
                max_perf,
-               boost),
+               boost,
+               changed),
 
        TP_STRUCT__entry(
                __field(unsigned int, cpu_id)
@@ -107,6 +109,7 @@ TRACE_EVENT(amd_pstate_epp_perf,
                __field(u8, min_perf)
                __field(u8, max_perf)
                __field(bool, boost)
+               __field(bool, changed)
                ),
 
        TP_fast_assign(
@@ -116,15 +119,17 @@ TRACE_EVENT(amd_pstate_epp_perf,
                __entry->min_perf = min_perf;
                __entry->max_perf = max_perf;
                __entry->boost = boost;
+               __entry->changed = changed;
                ),
 
-       TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u",
+       TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u, changed=%u",
                  (unsigned int)__entry->cpu_id,
                  (u8)__entry->min_perf,
                  (u8)__entry->max_perf,
                  (u8)__entry->highest_perf,
                  (u8)__entry->epp,
-                 (bool)__entry->boost
+                 (bool)__entry->boost,
+                 (bool)__entry->changed
                 )
 );
 
index 06bf0d888be6b0da3949282753e31292dc66ccc8..e5db731618e820b260739778786d10f3d38c5b6c 100644 (file)
@@ -228,9 +228,10 @@ static u8 shmem_get_epp(struct amd_cpudata *cpudata)
        return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp);
 }
 
-static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
+static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
                           u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
 {
+       struct amd_cpudata *cpudata = policy->driver_data;
        u64 value, prev;
 
        value = prev = READ_ONCE(cpudata->cppc_req_cached);
@@ -242,6 +243,18 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
        value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
        value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
 
+       if (trace_amd_pstate_epp_perf_enabled()) {
+               union perf_cached perf = READ_ONCE(cpudata->perf);
+
+               trace_amd_pstate_epp_perf(cpudata->cpu,
+                                         perf.highest_perf,
+                                         epp,
+                                         min_perf,
+                                         max_perf,
+                                         policy->boost_enabled,
+                                         value != prev);
+       }
+
        if (value == prev)
                return 0;
 
@@ -256,24 +269,26 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
        }
 
        WRITE_ONCE(cpudata->cppc_req_cached, value);
-       WRITE_ONCE(cpudata->epp_cached, epp);
+       if (epp != cpudata->epp_cached)
+               WRITE_ONCE(cpudata->epp_cached, epp);
 
        return 0;
 }
 
 DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
 
-static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
+static inline int amd_pstate_update_perf(struct cpufreq_policy *policy,
                                          u8 min_perf, u8 des_perf,
                                          u8 max_perf, u8 epp,
                                          bool fast_switch)
 {
-       return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+       return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf,
                                                   max_perf, epp, fast_switch);
 }
 
-static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
+static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
 {
+       struct amd_cpudata *cpudata = policy->driver_data;
        u64 value, prev;
        int ret;
 
@@ -281,6 +296,19 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
        value &= ~AMD_CPPC_EPP_PERF_MASK;
        value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
 
+       if (trace_amd_pstate_epp_perf_enabled()) {
+               union perf_cached perf = cpudata->perf;
+
+               trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+                                         epp,
+                                         FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+                                                   cpudata->cppc_req_cached),
+                                         FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+                                                   cpudata->cppc_req_cached),
+                                         policy->boost_enabled,
+                                         value != prev);
+       }
+
        if (value == prev)
                return 0;
 
@@ -299,15 +327,29 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
 
 DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
 
-static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u8 epp)
+static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp)
 {
-       return static_call(amd_pstate_set_epp)(cpudata, epp);
+       return static_call(amd_pstate_set_epp)(policy, epp);
 }
 
-static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp)
+static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
 {
-       int ret;
+       struct amd_cpudata *cpudata = policy->driver_data;
        struct cppc_perf_ctrls perf_ctrls;
+       int ret;
+
+       if (trace_amd_pstate_epp_perf_enabled()) {
+               union perf_cached perf = cpudata->perf;
+
+               trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+                                         epp,
+                                         FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+                                                   cpudata->cppc_req_cached),
+                                         FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+                                                   cpudata->cppc_req_cached),
+                                         policy->boost_enabled,
+                                         epp != cpudata->epp_cached);
+       }
 
        if (epp == cpudata->epp_cached)
                return 0;
@@ -339,17 +381,7 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
                return -EBUSY;
        }
 
-       if (trace_amd_pstate_epp_perf_enabled()) {
-               union perf_cached perf = READ_ONCE(cpudata->perf);
-
-               trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
-                                         epp,
-                                         FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
-                                         FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
-                                         policy->boost_enabled);
-       }
-
-       return amd_pstate_set_epp(cpudata, epp);
+       return amd_pstate_set_epp(policy, epp);
 }
 
 static inline int msr_cppc_enable(bool enable)
@@ -492,15 +524,16 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
        return static_call(amd_pstate_init_perf)(cpudata);
 }
 
-static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
+static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf,
                             u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
 {
+       struct amd_cpudata *cpudata = policy->driver_data;
        struct cppc_perf_ctrls perf_ctrls;
        u64 value, prev;
        int ret;
 
        if (cppc_state == AMD_PSTATE_ACTIVE) {
-               int ret = shmem_set_epp(cpudata, epp);
+               int ret = shmem_set_epp(policy, epp);
 
                if (ret)
                        return ret;
@@ -515,6 +548,18 @@ static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
        value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
        value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
 
+       if (trace_amd_pstate_epp_perf_enabled()) {
+               union perf_cached perf = READ_ONCE(cpudata->perf);
+
+               trace_amd_pstate_epp_perf(cpudata->cpu,
+                                         perf.highest_perf,
+                                         epp,
+                                         min_perf,
+                                         max_perf,
+                                         policy->boost_enabled,
+                                         value != prev);
+       }
+
        if (value == prev)
                return 0;
 
@@ -592,7 +637,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
                                cpudata->cpu, fast_switch);
        }
 
-       amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
+       amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch);
 }
 
 static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
@@ -1525,7 +1570,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
                        return ret;
                WRITE_ONCE(cpudata->cppc_req_cached, value);
        }
-       ret = amd_pstate_set_epp(cpudata, cpudata->epp_default);
+       ret = amd_pstate_set_epp(policy, cpudata->epp_default);
        if (ret)
                return ret;
 
@@ -1566,14 +1611,8 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
                epp = READ_ONCE(cpudata->epp_cached);
 
        perf = READ_ONCE(cpudata->perf);
-       if (trace_amd_pstate_epp_perf_enabled()) {
-               trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, epp,
-                                         perf.min_limit_perf,
-                                         perf.max_limit_perf,
-                                         policy->boost_enabled);
-       }
 
-       return amd_pstate_update_perf(cpudata, perf.min_limit_perf, 0U,
+       return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U,
                                      perf.max_limit_perf, epp, false);
 }
 
@@ -1605,20 +1644,12 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
 
 static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
 {
-       struct amd_cpudata *cpudata = policy->driver_data;
-       union perf_cached perf = READ_ONCE(cpudata->perf);
        int ret;
 
        ret = amd_pstate_cppc_enable(true);
        if (ret)
                pr_err("failed to enable amd pstate during resume, return %d\n", ret);
 
-       if (trace_amd_pstate_epp_perf_enabled()) {
-               trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
-                                         cpudata->epp_cached,
-                                         FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
-                                         perf.highest_perf, policy->boost_enabled);
-       }
 
        return amd_pstate_epp_update_limit(policy);
 }
@@ -1646,14 +1677,7 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
        if (cpudata->suspended)
                return 0;
 
-       if (trace_amd_pstate_epp_perf_enabled()) {
-               trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
-                                         AMD_CPPC_EPP_BALANCE_POWERSAVE,
-                                         perf.lowest_perf, perf.lowest_perf,
-                                         policy->boost_enabled);
-       }
-
-       return amd_pstate_update_perf(cpudata, perf.lowest_perf, 0, perf.lowest_perf,
+       return amd_pstate_update_perf(policy, perf.lowest_perf, 0, perf.lowest_perf,
                                      AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
 }