]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
cpufreq/amd-pstate: Convert the amd_pstate_get/set_epp() to static calls
authorDhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Wed, 4 Dec 2024 14:48:38 +0000 (14:48 +0000)
committerMario Limonciello <mario.limonciello@amd.com>
Wed, 11 Dec 2024 16:44:52 +0000 (10:44 -0600)
MSR and shared memory based systems have different mechanisms to get and
set the epp value. Split those mechanisms into different functions and
assign them appropriately to the static calls at boot time. This eliminates
the need for the "if(cpu_feature_enabled(X86_FEATURE_CPPC))" checks at
runtime.

Also, propagate the error code from rdmsrl_on_cpu() and cppc_get_epp_perf()
to *_get_epp()'s caller, instead of returning -EIO unconditionally.

Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Link: https://lore.kernel.org/r/20241204144842.164178-2-Dhananjay.Ugwekar@amd.com
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
drivers/cpufreq/amd-pstate.c

index 66e5dfc711c0c5335340196be687a469853aefa4..bc42d96984f4c4a8cf36820e6f1a7e57bb255f0e 100644 (file)
@@ -180,26 +180,40 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
 static DEFINE_MUTEX(amd_pstate_limits_lock);
 static DEFINE_MUTEX(amd_pstate_driver_lock);
 
-static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+static s16 msr_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
 {
        u64 epp;
        int ret;
 
-       if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
-               if (!cppc_req_cached) {
-                       epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
-                                       &cppc_req_cached);
-                       if (epp)
-                               return epp;
-               }
-               epp = (cppc_req_cached >> 24) & 0xFF;
-       } else {
-               ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+       if (!cppc_req_cached) {
+               ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req_cached);
                if (ret < 0) {
                        pr_debug("Could not retrieve energy perf value (%d)\n", ret);
-                       return -EIO;
+                       return ret;
                }
        }
+       epp = (cppc_req_cached >> 24) & 0xFF;
+
+       return (s16)epp;
+}
+
+DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
+
+static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+{
+       return static_call(amd_pstate_get_epp)(cpudata, cppc_req_cached);
+}
+
+static s16 shmem_get_epp(struct amd_cpudata *cpudata, u64 dummy)
+{
+       u64 epp;
+       int ret;
+
+       ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+       if (ret < 0) {
+               pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+               return ret;
+       }
 
        return (s16)(epp & 0xff);
 }
@@ -253,33 +267,45 @@ static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
                                            max_perf, fast_switch);
 }
 
-static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
 {
        int ret;
-       struct cppc_perf_ctrls perf_ctrls;
-
-       if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
-               u64 value = READ_ONCE(cpudata->cppc_req_cached);
 
-               value &= ~GENMASK_ULL(31, 24);
-               value |= (u64)epp << 24;
-               WRITE_ONCE(cpudata->cppc_req_cached, value);
+       u64 value = READ_ONCE(cpudata->cppc_req_cached);
 
-               ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
-               if (!ret)
-                       cpudata->epp_cached = epp;
-       } else {
-               amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
-                                            cpudata->max_limit_perf, false);
+       value &= ~GENMASK_ULL(31, 24);
+       value |= (u64)epp << 24;
+       WRITE_ONCE(cpudata->cppc_req_cached, value);
 
-               perf_ctrls.energy_perf = epp;
-               ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
-               if (ret) {
-                       pr_debug("failed to set energy perf value (%d)\n", ret);
-                       return ret;
-               }
+       ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+       if (!ret)
                cpudata->epp_cached = epp;
+
+       return ret;
+}
+
+DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
+
+static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+{
+       return static_call(amd_pstate_set_epp)(cpudata, epp);
+}
+
+static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
+{
+       int ret;
+       struct cppc_perf_ctrls perf_ctrls;
+
+       amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
+                                    cpudata->max_limit_perf, false);
+
+       perf_ctrls.energy_perf = epp;
+       ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+       if (ret) {
+               pr_debug("failed to set energy perf value (%d)\n", ret);
+               return ret;
        }
+       cpudata->epp_cached = epp;
 
        return ret;
 }
@@ -1869,6 +1895,8 @@ static int __init amd_pstate_init(void)
                static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
                static_call_update(amd_pstate_init_perf, shmem_init_perf);
                static_call_update(amd_pstate_update_perf, shmem_update_perf);
+               static_call_update(amd_pstate_get_epp, shmem_get_epp);
+               static_call_update(amd_pstate_set_epp, shmem_set_epp);
        }
 
        if (amd_pstate_prefcore) {