Use the generic dpm table structure instead of SMUv13 specific table.
Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
#define SMU13_TOOL_SIZE 0x19000
-#define MAX_DPM_LEVELS 16
#define MAX_PCIE_CONF 3
#define CTF_OFFSET_EDGE 5
uint32_t soc_clock;
};
-struct smu_13_0_dpm_clk_level {
- bool enabled;
- uint32_t value;
-};
-
-struct smu_13_0_dpm_table {
- uint32_t min; /* MHz */
- uint32_t max; /* MHz */
- uint32_t count;
- bool is_fine_grained;
- struct smu_13_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS];
-};
-
struct smu_13_0_pcie_table {
uint8_t pcie_gen[MAX_PCIE_CONF];
uint8_t pcie_lane[MAX_PCIE_CONF];
};
struct smu_13_0_dpm_tables {
- struct smu_13_0_dpm_table soc_table;
- struct smu_13_0_dpm_table gfx_table;
- struct smu_13_0_dpm_table uclk_table;
- struct smu_13_0_dpm_table eclk_table;
- struct smu_13_0_dpm_table vclk_table;
- struct smu_13_0_dpm_table dclk_table;
- struct smu_13_0_dpm_table dcef_table;
- struct smu_13_0_dpm_table pixel_table;
- struct smu_13_0_dpm_table display_table;
- struct smu_13_0_dpm_table phy_table;
- struct smu_13_0_dpm_table fclk_table;
+ struct smu_dpm_table soc_table;
+ struct smu_dpm_table gfx_table;
+ struct smu_dpm_table uclk_table;
+ struct smu_dpm_table eclk_table;
+ struct smu_dpm_table vclk_table;
+ struct smu_dpm_table dclk_table;
+ struct smu_dpm_table dcef_table;
+ struct smu_dpm_table pixel_table;
+ struct smu_dpm_table display_table;
+ struct smu_dpm_table phy_table;
+ struct smu_dpm_table fclk_table;
struct smu_13_0_pcie_table pcie_table;
};
int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
- struct smu_13_0_dpm_table *single_dpm_table);
+ struct smu_dpm_table *single_dpm_table);
int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type, uint16_t level,
uint32_t *min, uint32_t *max)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
uint32_t min_clk, max_clk;
if (amdgpu_sriov_vf(smu->adev)) {
return -EINVAL;
}
- min_clk = dpm_table->min;
- max_clk = dpm_table->max;
+ min_clk = SMU_DPM_TABLE_MIN(dpm_table);
+ max_clk = SMU_DPM_TABLE_MAX(dpm_table);
if (min) {
if (!min_clk)
static int aldebaran_set_default_dpm_table(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *dpm_table = NULL;
+ struct smu_dpm_table *dpm_table = NULL;
PPTable_t *pptable = smu->smu_table.driver_pptable;
int ret = 0;
/* socclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.soc_table;
+ dpm_table->clk_type = SMU_SOCCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_SOCCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* gfxclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.gfx_table;
+ dpm_table->clk_type = SMU_GFXCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
/* in the case of gfxclk, only fine-grained dpm is honored */
dpm_table->count = 2;
dpm_table->dpm_levels[0].enabled = true;
dpm_table->dpm_levels[1].value = pptable->GfxclkFmax;
dpm_table->dpm_levels[1].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[1].value;
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* memclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.uclk_table;
+ dpm_table->clk_type = SMU_UCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_UCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* fclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.fclk_table;
+ dpm_table->clk_type = SMU_FCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_FCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
return 0;
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_13_0_dpm_table *mem_table =
- &dpm_context->dpm_tables.uclk_table;
- struct smu_13_0_dpm_table *soc_table =
- &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
- pstate_table->gfxclk_pstate.min = gfx_table->min;
- pstate_table->gfxclk_pstate.peak = gfx_table->max;
- pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
- pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ pstate_table->gfxclk_pstate.min = SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.peak = SMU_DPM_TABLE_MAX(gfx_table);
+ pstate_table->gfxclk_pstate.curr.min = SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.curr.max = SMU_DPM_TABLE_MAX(gfx_table);
- pstate_table->uclk_pstate.min = mem_table->min;
- pstate_table->uclk_pstate.peak = mem_table->max;
- pstate_table->uclk_pstate.curr.min = mem_table->min;
- pstate_table->uclk_pstate.curr.max = mem_table->max;
+ pstate_table->uclk_pstate.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.peak = SMU_DPM_TABLE_MAX(mem_table);
+ pstate_table->uclk_pstate.curr.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.curr.max = SMU_DPM_TABLE_MAX(mem_table);
- pstate_table->socclk_pstate.min = soc_table->min;
- pstate_table->socclk_pstate.peak = soc_table->max;
- pstate_table->socclk_pstate.curr.min = soc_table->min;
- pstate_table->socclk_pstate.curr.max = soc_table->max;
+ pstate_table->socclk_pstate.min = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->socclk_pstate.peak = SMU_DPM_TABLE_MAX(soc_table);
+ pstate_table->socclk_pstate.curr.min = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->socclk_pstate.curr.max = SMU_DPM_TABLE_MAX(soc_table);
if (gfx_table->count > ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL &&
mem_table->count > ALDEBARAN_UMD_PSTATE_MCLK_LEVEL &&
static void aldebaran_get_clk_table(struct smu_context *smu,
struct pp_clock_levels_with_latency *clocks,
- struct smu_13_0_dpm_table *dpm_table)
+ struct smu_dpm_table *dpm_table)
{
uint32_t i;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
struct pp_clock_levels_with_latency clocks;
- struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
uint32_t i;
enum smu_clk_type type, uint32_t mask)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *single_dpm_table = NULL;
+ struct smu_dpm_table *single_dpm_table = NULL;
uint32_t soft_min_level, soft_max_level;
int ret = 0;
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
int r;
if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
(level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
- pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ pstate_table->gfxclk_pstate.curr.max =
+ SMU_DPM_TABLE_MAX(gfx_table);
}
switch (level) {
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
return -EINVAL;
-
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
&& (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
return -EINVAL;
}
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
- if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
- (max > dpm_context->dpm_tables.gfx_table.max)) {
+ if (!max || (max < min_clk) || (max > max_clk)) {
dev_warn(adev->dev,
"Invalid max frequency %d MHz specified for determinism\n", max);
return -EINVAL;
}
/* Restore default min/max clocks and enable determinism */
- min_clk = dpm_context->dpm_tables.gfx_table.min;
- max_clk = dpm_context->dpm_tables.gfx_table.max;
ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
if (!ret) {
usleep_range(500, 1000);
dev_err(smu->adev->dev, "Input parameter number not correct\n");
return -EINVAL;
}
-
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
if (input[0] == 0) {
- if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
- dev_warn(smu->adev->dev, "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
- input[1], dpm_context->dpm_tables.gfx_table.min);
+ if (input[1] < min_clk) {
+ dev_warn(
+ smu->adev->dev,
+ "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
+ input[1], min_clk);
pstate_table->gfxclk_pstate.custom.min =
pstate_table->gfxclk_pstate.curr.min;
return -EINVAL;
pstate_table->gfxclk_pstate.custom.min = input[1];
} else if (input[0] == 1) {
- if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
- dev_warn(smu->adev->dev, "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
- input[1], dpm_context->dpm_tables.gfx_table.max);
+ if (input[1] > max_clk) {
+ dev_warn(
+ smu->adev->dev,
+ "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_clk);
pstate_table->gfxclk_pstate.custom.max =
pstate_table->gfxclk_pstate.curr.max;
return -EINVAL;
return -EINVAL;
} else {
/* Use the default frequencies for manual and determinism mode */
- min_clk = dpm_context->dpm_tables.gfx_table.min;
- max_clk = dpm_context->dpm_tables.gfx_table.max;
+ min_clk = SMU_DPM_TABLE_MIN(
+ &dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(
+ &dpm_context->dpm_tables.gfx_table);
ret = aldebaran_set_soft_freq_limited_range(
smu, SMU_GFXCLK, min_clk, max_clk, false);
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_13_0_dpm_table *mem_table =
- &dpm_context->dpm_tables.uclk_table;
- struct smu_13_0_dpm_table *soc_table =
- &dpm_context->dpm_tables.soc_table;
- struct smu_13_0_dpm_table *vclk_table =
- &dpm_context->dpm_tables.vclk_table;
- struct smu_13_0_dpm_table *dclk_table =
- &dpm_context->dpm_tables.dclk_table;
- struct smu_13_0_dpm_table *fclk_table =
- &dpm_context->dpm_tables.fclk_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *vclk_table = &dpm_context->dpm_tables.vclk_table;
+ struct smu_dpm_table *dclk_table = &dpm_context->dpm_tables.dclk_table;
+ struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
struct amdgpu_device *adev = smu->adev;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
- sclk_min = sclk_max = gfx_table->max;
- mclk_min = mclk_max = mem_table->max;
- socclk_min = socclk_max = soc_table->max;
- vclk_min = vclk_max = vclk_table->max;
- dclk_min = dclk_max = dclk_table->max;
- fclk_min = fclk_max = fclk_table->max;
+ sclk_min = sclk_max = SMU_DPM_TABLE_MAX(gfx_table);
+ mclk_min = mclk_max = SMU_DPM_TABLE_MAX(mem_table);
+ socclk_min = socclk_max = SMU_DPM_TABLE_MAX(soc_table);
+ vclk_min = vclk_max = SMU_DPM_TABLE_MAX(vclk_table);
+ dclk_min = dclk_max = SMU_DPM_TABLE_MAX(dclk_table);
+ fclk_min = fclk_max = SMU_DPM_TABLE_MAX(fclk_table);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
- sclk_min = sclk_max = gfx_table->min;
- mclk_min = mclk_max = mem_table->min;
- socclk_min = socclk_max = soc_table->min;
- vclk_min = vclk_max = vclk_table->min;
- dclk_min = dclk_max = dclk_table->min;
- fclk_min = fclk_max = fclk_table->min;
+ sclk_min = sclk_max = SMU_DPM_TABLE_MIN(gfx_table);
+ mclk_min = mclk_max = SMU_DPM_TABLE_MIN(mem_table);
+ socclk_min = socclk_max = SMU_DPM_TABLE_MIN(soc_table);
+ vclk_min = vclk_max = SMU_DPM_TABLE_MIN(vclk_table);
+ dclk_min = dclk_max = SMU_DPM_TABLE_MIN(dclk_table);
+ fclk_min = fclk_max = SMU_DPM_TABLE_MIN(fclk_table);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
- sclk_min = gfx_table->min;
- sclk_max = gfx_table->max;
- mclk_min = mem_table->min;
- mclk_max = mem_table->max;
- socclk_min = soc_table->min;
- socclk_max = soc_table->max;
- vclk_min = vclk_table->min;
- vclk_max = vclk_table->max;
- dclk_min = dclk_table->min;
- dclk_max = dclk_table->max;
- fclk_min = fclk_table->min;
- fclk_max = fclk_table->max;
+ sclk_min = SMU_DPM_TABLE_MIN(gfx_table);
+ sclk_max = SMU_DPM_TABLE_MAX(gfx_table);
+ mclk_min = SMU_DPM_TABLE_MIN(mem_table);
+ mclk_max = SMU_DPM_TABLE_MAX(mem_table);
+ socclk_min = SMU_DPM_TABLE_MIN(soc_table);
+ socclk_max = SMU_DPM_TABLE_MAX(soc_table);
+ vclk_min = SMU_DPM_TABLE_MIN(vclk_table);
+ vclk_max = SMU_DPM_TABLE_MAX(vclk_table);
+ dclk_min = SMU_DPM_TABLE_MIN(dclk_table);
+ dclk_max = SMU_DPM_TABLE_MAX(dclk_table);
+ fclk_min = SMU_DPM_TABLE_MIN(fclk_table);
+ fclk_max = SMU_DPM_TABLE_MAX(fclk_table);
auto_level = true;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
- struct smu_13_0_dpm_table *single_dpm_table)
+ struct smu_dpm_table *single_dpm_table)
{
int ret = 0;
uint32_t clk;
int i;
+ bool is_fine_grained;
ret = smu_v13_0_get_dpm_level_count(smu,
clk_type,
}
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 2)) {
- ret = smu_v13_0_get_fine_grained_status(smu,
- clk_type,
- &single_dpm_table->is_fine_grained);
+ ret = smu_v13_0_get_fine_grained_status(smu, clk_type,
+ &is_fine_grained);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
return ret;
}
+ if (is_fine_grained)
+ single_dpm_table->flags |= SMU_DPM_TABLE_FINE_GRAINED;
}
for (i = 0; i < single_dpm_table->count; i++) {
single_dpm_table->dpm_levels[i].value = clk;
single_dpm_table->dpm_levels[i].enabled = true;
-
- if (i == 0)
- single_dpm_table->min = clk;
- else if (i == single_dpm_table->count - 1)
- single_dpm_table->max = clk;
}
return 0;
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
int ret = 0;
/* socclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.soc_table;
+ dpm_table->clk_type = SMU_SOCCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_SOCCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* gfxclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.gfx_table;
+ dpm_table->clk_type = SMU_GFXCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_GFXCLK,
skutable->DriverReportedClocks.GameClockAc)) {
dpm_table->dpm_levels[dpm_table->count - 1].value =
skutable->DriverReportedClocks.GameClockAc;
- dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
}
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* uclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.uclk_table;
+ dpm_table->clk_type = SMU_UCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_UCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* fclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.fclk_table;
+ dpm_table->clk_type = SMU_FCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_FCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* vclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.vclk_table;
+ dpm_table->clk_type = SMU_VCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_VCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* dclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dclk_table;
+ dpm_table->clk_type = SMU_DCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_DCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
+ dpm_table->clk_type = SMU_DCEFCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_DCEFCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
return 0;
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
switch (clk_type) {
case SMU_MCLK:
}
if (min)
- *min = dpm_table->min;
+ *min = SMU_DPM_TABLE_MIN(dpm_table);
if (max)
- *max = dpm_table->max;
+ *max = SMU_DPM_TABLE_MAX(dpm_table);
return 0;
}
OverDriveTableExternal_t *od_table =
(OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
int i, curr_freq, size = *offset, start_offset = *offset;
- struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
int32_t min_value, max_value;
return ret;
}
- if (single_dpm_table->is_fine_grained) {
+ if (single_dpm_table->flags & SMU_DPM_TABLE_FINE_GRAINED) {
/*
* For fine grained dpms, there are only two dpm levels:
* - level 0 -> min clock freq
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
- struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level;
uint32_t min_freq, max_freq;
int ret = 0;
case SMU_VCLK1:
case SMU_DCLK:
case SMU_DCLK1:
- if (single_dpm_table->is_fine_grained) {
+ if (single_dpm_table->flags & SMU_DPM_TABLE_FINE_GRAINED) {
/* There is only 2 levels for fine grained DPM */
soft_max_level = (soft_max_level >= 1 ? 1 : 0);
soft_min_level = (soft_min_level >= 1 ? 1 : 0);
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_13_0_dpm_table *mem_table =
- &dpm_context->dpm_tables.uclk_table;
- struct smu_13_0_dpm_table *soc_table =
- &dpm_context->dpm_tables.soc_table;
- struct smu_13_0_dpm_table *vclk_table =
- &dpm_context->dpm_tables.vclk_table;
- struct smu_13_0_dpm_table *dclk_table =
- &dpm_context->dpm_tables.dclk_table;
- struct smu_13_0_dpm_table *fclk_table =
- &dpm_context->dpm_tables.fclk_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *vclk_table = &dpm_context->dpm_tables.vclk_table;
+ struct smu_dpm_table *dclk_table = &dpm_context->dpm_tables.dclk_table;
+ struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
struct smu_table_context *table_context = &smu->smu_table;
DriverReportedClocks_t driver_clocks =
pptable->SkuTable.DriverReportedClocks;
- pstate_table->gfxclk_pstate.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.min = SMU_DPM_TABLE_MIN(gfx_table);
if (driver_clocks.GameClockAc &&
- (driver_clocks.GameClockAc < gfx_table->max))
+ (driver_clocks.GameClockAc < SMU_DPM_TABLE_MAX(gfx_table)))
pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
else
- pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ pstate_table->gfxclk_pstate.peak = SMU_DPM_TABLE_MAX(gfx_table);
- pstate_table->uclk_pstate.min = mem_table->min;
- pstate_table->uclk_pstate.peak = mem_table->max;
+ pstate_table->uclk_pstate.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.peak = SMU_DPM_TABLE_MAX(mem_table);
- pstate_table->socclk_pstate.min = soc_table->min;
- pstate_table->socclk_pstate.peak = soc_table->max;
+ pstate_table->socclk_pstate.min = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->socclk_pstate.peak = SMU_DPM_TABLE_MAX(soc_table);
- pstate_table->vclk_pstate.min = vclk_table->min;
- pstate_table->vclk_pstate.peak = vclk_table->max;
+ pstate_table->vclk_pstate.min = SMU_DPM_TABLE_MIN(vclk_table);
+ pstate_table->vclk_pstate.peak = SMU_DPM_TABLE_MAX(vclk_table);
- pstate_table->dclk_pstate.min = dclk_table->min;
- pstate_table->dclk_pstate.peak = dclk_table->max;
+ pstate_table->dclk_pstate.min = SMU_DPM_TABLE_MIN(dclk_table);
+ pstate_table->dclk_pstate.peak = SMU_DPM_TABLE_MAX(dclk_table);
- pstate_table->fclk_pstate.min = fclk_table->min;
- pstate_table->fclk_pstate.peak = fclk_table->max;
+ pstate_table->fclk_pstate.min = SMU_DPM_TABLE_MIN(fclk_table);
+ pstate_table->fclk_pstate.peak = SMU_DPM_TABLE_MAX(fclk_table);
if (driver_clocks.BaseClockAc &&
- driver_clocks.BaseClockAc < gfx_table->max)
+ driver_clocks.BaseClockAc < SMU_DPM_TABLE_MAX(gfx_table))
pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
else
- pstate_table->gfxclk_pstate.standard = gfx_table->max;
- pstate_table->uclk_pstate.standard = mem_table->max;
- pstate_table->socclk_pstate.standard = soc_table->min;
- pstate_table->vclk_pstate.standard = vclk_table->min;
- pstate_table->dclk_pstate.standard = dclk_table->min;
- pstate_table->fclk_pstate.standard = fclk_table->min;
+ pstate_table->gfxclk_pstate.standard =
+ SMU_DPM_TABLE_MAX(gfx_table);
+ pstate_table->uclk_pstate.standard = SMU_DPM_TABLE_MAX(mem_table);
+ pstate_table->socclk_pstate.standard = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->vclk_pstate.standard = SMU_DPM_TABLE_MIN(vclk_table);
+ pstate_table->dclk_pstate.standard = SMU_DPM_TABLE_MIN(dclk_table);
+ pstate_table->fclk_pstate.standard = SMU_DPM_TABLE_MIN(fclk_table);
return 0;
}
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
uint32_t feature_num;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
uint32_t *freq_table;
};
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
uint32_t min_clk, max_clk, param;
int ret = 0, clk_id = 0;
return -EINVAL;
}
- min_clk = dpm_table->min;
- max_clk = dpm_table->max;
-
+ min_clk = SMU_DPM_TABLE_MIN(dpm_table);
+ max_clk = SMU_DPM_TABLE_MAX(dpm_table);
if (min)
*min = min_clk;
if (max)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_13_0_dpm_table *dpm_table = NULL;
+ struct smu_dpm_table *dpm_table = NULL;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
uint32_t gfxclkmin, gfxclkmax, levels;
smu_v13_0_6_pm_policy_init(smu);
/* gfxclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.gfx_table;
+ dpm_table->clk_type = SMU_GFXCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
/* In the case of gfxclk, only fine-grained dpm is honored.
* Get min/max values from FW.
&gfxclkmin, &gfxclkmax);
if (ret)
return ret;
-
dpm_table->count = 2;
dpm_table->dpm_levels[0].value = gfxclkmin;
dpm_table->dpm_levels[0].enabled = true;
dpm_table->dpm_levels[1].value = gfxclkmax;
dpm_table->dpm_levels[1].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[1].value;
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
for (j = 0; j < ARRAY_SIZE(dpm_map); j++) {
return ret;
}
dpm_table->count = levels;
+ dpm_table->clk_type = dpm_map[j].clk_type;
for (i = 0; i < dpm_table->count; ++i) {
dpm_table->dpm_levels[i].value =
dpm_map[j].freq_table[i];
dpm_table->dpm_levels[i].enabled = true;
-
}
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[levels - 1].value;
-
}
return 0;
static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_13_0_dpm_table *mem_table =
- &dpm_context->dpm_tables.uclk_table;
- struct smu_13_0_dpm_table *soc_table =
- &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
- pstate_table->gfxclk_pstate.min = gfx_table->min;
- pstate_table->gfxclk_pstate.peak = gfx_table->max;
- pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
- pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ pstate_table->gfxclk_pstate.min = SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.peak = SMU_DPM_TABLE_MAX(gfx_table);
+ pstate_table->gfxclk_pstate.curr.min = SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.curr.max = SMU_DPM_TABLE_MAX(gfx_table);
- pstate_table->uclk_pstate.min = mem_table->min;
- pstate_table->uclk_pstate.peak = mem_table->max;
- pstate_table->uclk_pstate.curr.min = mem_table->min;
- pstate_table->uclk_pstate.curr.max = mem_table->max;
+ pstate_table->uclk_pstate.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.peak = SMU_DPM_TABLE_MAX(mem_table);
+ pstate_table->uclk_pstate.curr.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.curr.max = SMU_DPM_TABLE_MAX(mem_table);
- pstate_table->socclk_pstate.min = soc_table->min;
- pstate_table->socclk_pstate.peak = soc_table->max;
- pstate_table->socclk_pstate.curr.min = soc_table->min;
- pstate_table->socclk_pstate.curr.max = soc_table->max;
+ pstate_table->socclk_pstate.min = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->socclk_pstate.peak = SMU_DPM_TABLE_MAX(soc_table);
+ pstate_table->socclk_pstate.curr.min = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->socclk_pstate.curr.max = SMU_DPM_TABLE_MAX(soc_table);
if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
return 0;
}
-static int smu_v13_0_6_get_clk_table(struct smu_context *smu,
- struct pp_clock_levels_with_latency *clocks,
- struct smu_13_0_dpm_table *dpm_table)
+static int
+smu_v13_0_6_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct smu_dpm_table *dpm_table)
{
int i, count;
}
static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
- struct smu_13_0_dpm_table *single_dpm_table,
+ struct smu_dpm_table *single_dpm_table,
uint32_t curr_clk, const char *clk_name)
{
struct pp_clock_levels_with_latency clocks;
int now, size = *offset, start_offset = *offset;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
- struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
uint32_t min_clk, max_clk;
}
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
- min_clk = single_dpm_table->min;
- max_clk = single_dpm_table->max;
+ min_clk = SMU_DPM_TABLE_MIN(single_dpm_table);
+ max_clk = SMU_DPM_TABLE_MAX(single_dpm_table);
if (now < SMU_13_0_6_DSCLK_THRESHOLD) {
size += sysfs_emit_at(buf, size, "S: %uMhz *\n",
enum smu_clk_type type, uint32_t mask)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *single_dpm_table = NULL;
+ struct smu_dpm_table *single_dpm_table = NULL;
uint32_t soft_min_level, soft_max_level;
int ret = 0;
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_13_0_dpm_table *uclk_table =
- &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *uclk_table = &dpm_context->dpm_tables.uclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
int ret;
if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
(level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
- pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ pstate_table->gfxclk_pstate.curr.max =
+ SMU_DPM_TABLE_MAX(gfx_table);
}
switch (level) {
return 0;
case AMD_DPM_FORCED_LEVEL_AUTO:
- if ((gfx_table->min != pstate_table->gfxclk_pstate.curr.min) ||
- (gfx_table->max != pstate_table->gfxclk_pstate.curr.max)) {
+ if ((SMU_DPM_TABLE_MIN(gfx_table) !=
+ pstate_table->gfxclk_pstate.curr.min) ||
+ (SMU_DPM_TABLE_MAX(gfx_table) !=
+ pstate_table->gfxclk_pstate.curr.max)) {
ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
- smu, gfx_table->min, gfx_table->max);
+ smu, SMU_DPM_TABLE_MIN(gfx_table),
+ SMU_DPM_TABLE_MAX(gfx_table));
if (ret)
return ret;
- pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
- pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ pstate_table->gfxclk_pstate.curr.min =
+ SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.curr.max =
+ SMU_DPM_TABLE_MAX(gfx_table);
}
- if (uclk_table->max != pstate_table->uclk_pstate.curr.max) {
+ if (SMU_DPM_TABLE_MAX(uclk_table) !=
+ pstate_table->uclk_pstate.curr.max) {
/* Min UCLK is not expected to be changed */
ret = smu_v13_0_set_soft_freq_limited_range(
- smu, SMU_UCLK, 0, uclk_table->max, false);
+ smu, SMU_UCLK, 0, SMU_DPM_TABLE_MAX(uclk_table),
+ false);
if (ret)
return ret;
- pstate_table->uclk_pstate.curr.max = uclk_table->max;
+ pstate_table->uclk_pstate.curr.max =
+ SMU_DPM_TABLE_MAX(uclk_table);
}
smu_v13_0_reset_custom_level(smu);
}
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
- if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
- (max > dpm_context->dpm_tables.gfx_table.max)) {
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
+ if (!max || (max < min_clk) || (max > max_clk)) {
dev_warn(
adev->dev,
"Invalid max frequency %d MHz specified for determinism\n",
}
/* Restore default min/max clocks and enable determinism */
- min_clk = dpm_context->dpm_tables.gfx_table.min;
- max_clk = dpm_context->dpm_tables.gfx_table.max;
ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk,
- max_clk);
+ max_clk);
if (!ret) {
usleep_range(500, 1000);
ret = smu_cmn_send_smc_msg_with_param(
"Input parameter number not correct\n");
return -EINVAL;
}
-
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
if (input[0] == 0) {
- if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
+ if (input[1] < min_clk) {
dev_warn(
smu->adev->dev,
"Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
- input[1],
- dpm_context->dpm_tables.gfx_table.min);
+ input[1], min_clk);
pstate_table->gfxclk_pstate.custom.min =
pstate_table->gfxclk_pstate.curr.min;
return -EINVAL;
pstate_table->gfxclk_pstate.custom.min = input[1];
} else if (input[0] == 1) {
- if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
+ if (input[1] > max_clk) {
dev_warn(
smu->adev->dev,
"Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
- input[1],
- dpm_context->dpm_tables.gfx_table.max);
+ input[1], max_clk);
pstate_table->gfxclk_pstate.custom.max =
pstate_table->gfxclk_pstate.curr.max;
return -EINVAL;
"UCLK_LIMITS setting not supported!\n");
return -EOPNOTSUPP;
}
-
+ max_clk =
+ SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.uclk_table);
if (input[0] == 0) {
dev_info(smu->adev->dev,
"Setting min UCLK level is not supported");
return -EINVAL;
} else if (input[0] == 1) {
- if (input[1] > dpm_context->dpm_tables.uclk_table.max) {
+ if (input[1] > max_clk) {
dev_warn(
smu->adev->dev,
"Maximum UCLK (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
- input[1],
- dpm_context->dpm_tables.uclk_table.max);
+ input[1], max_clk);
pstate_table->uclk_pstate.custom.max =
pstate_table->uclk_pstate.curr.max;
return -EINVAL;
return -EINVAL;
} else {
/* Use the default frequencies for manual and determinism mode */
- min_clk = dpm_context->dpm_tables.gfx_table.min;
- max_clk = dpm_context->dpm_tables.gfx_table.max;
+ min_clk = SMU_DPM_TABLE_MIN(
+ &dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(
+ &dpm_context->dpm_tables.gfx_table);
ret = smu_v13_0_6_set_soft_freq_limited_range(
smu, SMU_GFXCLK, min_clk, max_clk, false);
if (ret)
return ret;
- min_clk = dpm_context->dpm_tables.uclk_table.min;
- max_clk = dpm_context->dpm_tables.uclk_table.max;
+ min_clk = SMU_DPM_TABLE_MIN(
+ &dpm_context->dpm_tables.uclk_table);
+ max_clk = SMU_DPM_TABLE_MAX(
+ &dpm_context->dpm_tables.uclk_table);
ret = smu_v13_0_6_set_soft_freq_limited_range(
smu, SMU_UCLK, min_clk, max_clk, false);
if (ret)
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
SkuTable_t *skutable = &driver_ppt->SkuTable;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
int ret = 0;
/* socclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.soc_table;
+ dpm_table->clk_type = SMU_SOCCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_SOCCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* gfxclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.gfx_table;
+ dpm_table->clk_type = SMU_GFXCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_GFXCLK,
skutable->DriverReportedClocks.GameClockAc)) {
dpm_table->dpm_levels[dpm_table->count - 1].value =
skutable->DriverReportedClocks.GameClockAc;
- dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
}
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* uclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.uclk_table;
+ dpm_table->clk_type = SMU_UCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_UCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* fclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.fclk_table;
+ dpm_table->clk_type = SMU_FCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_FCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* vclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.vclk_table;
+ dpm_table->clk_type = SMU_VCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_VCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* dclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dclk_table;
+ dpm_table->clk_type = SMU_DCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_DCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
+ dpm_table->clk_type = SMU_DCEFCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_DCEFCLK,
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
return 0;
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
switch (clk_type) {
case SMU_MCLK:
}
if (min)
- *min = dpm_table->min;
+ *min = SMU_DPM_TABLE_MIN(dpm_table);
if (max)
- *max = dpm_table->max;
+ *max = SMU_DPM_TABLE_MAX(dpm_table);
return 0;
}
OverDriveTableExternal_t *od_table =
(OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
int i, curr_freq, size = *offset, start_offset = *offset;
- struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
int32_t min_value, max_value;
return ret;
}
- if (single_dpm_table->is_fine_grained) {
+ if (single_dpm_table->flags & SMU_DPM_TABLE_FINE_GRAINED) {
/*
* For fine grained dpms, there are only two dpm levels:
* - level 0 -> min clock freq
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
- struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level;
uint32_t min_freq, max_freq;
int ret = 0;
case SMU_VCLK1:
case SMU_DCLK:
case SMU_DCLK1:
- if (single_dpm_table->is_fine_grained) {
+ if (single_dpm_table->flags & SMU_DPM_TABLE_FINE_GRAINED) {
/* There is only 2 levels for fine grained DPM */
soft_max_level = (soft_max_level >= 1 ? 1 : 0);
soft_min_level = (soft_min_level >= 1 ? 1 : 0);
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_13_0_dpm_table *mem_table =
- &dpm_context->dpm_tables.uclk_table;
- struct smu_13_0_dpm_table *soc_table =
- &dpm_context->dpm_tables.soc_table;
- struct smu_13_0_dpm_table *vclk_table =
- &dpm_context->dpm_tables.vclk_table;
- struct smu_13_0_dpm_table *dclk_table =
- &dpm_context->dpm_tables.dclk_table;
- struct smu_13_0_dpm_table *fclk_table =
- &dpm_context->dpm_tables.fclk_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *vclk_table = &dpm_context->dpm_tables.vclk_table;
+ struct smu_dpm_table *dclk_table = &dpm_context->dpm_tables.dclk_table;
+ struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
struct smu_table_context *table_context = &smu->smu_table;
DriverReportedClocks_t driver_clocks =
pptable->SkuTable.DriverReportedClocks;
- pstate_table->gfxclk_pstate.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.min = SMU_DPM_TABLE_MIN(gfx_table);
if (driver_clocks.GameClockAc &&
- (driver_clocks.GameClockAc < gfx_table->max))
+ (driver_clocks.GameClockAc < SMU_DPM_TABLE_MAX(gfx_table)))
pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
else
- pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ pstate_table->gfxclk_pstate.peak = SMU_DPM_TABLE_MAX(gfx_table);
- pstate_table->uclk_pstate.min = mem_table->min;
- pstate_table->uclk_pstate.peak = mem_table->max;
+ pstate_table->uclk_pstate.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.peak = SMU_DPM_TABLE_MAX(mem_table);
- pstate_table->socclk_pstate.min = soc_table->min;
- pstate_table->socclk_pstate.peak = soc_table->max;
+ pstate_table->socclk_pstate.min = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->socclk_pstate.peak = SMU_DPM_TABLE_MAX(soc_table);
- pstate_table->vclk_pstate.min = vclk_table->min;
- pstate_table->vclk_pstate.peak = vclk_table->max;
+ pstate_table->vclk_pstate.min = SMU_DPM_TABLE_MIN(vclk_table);
+ pstate_table->vclk_pstate.peak = SMU_DPM_TABLE_MAX(vclk_table);
- pstate_table->dclk_pstate.min = dclk_table->min;
- pstate_table->dclk_pstate.peak = dclk_table->max;
+ pstate_table->dclk_pstate.min = SMU_DPM_TABLE_MIN(dclk_table);
+ pstate_table->dclk_pstate.peak = SMU_DPM_TABLE_MAX(dclk_table);
- pstate_table->fclk_pstate.min = fclk_table->min;
- pstate_table->fclk_pstate.peak = fclk_table->max;
+ pstate_table->fclk_pstate.min = SMU_DPM_TABLE_MIN(fclk_table);
+ pstate_table->fclk_pstate.peak = SMU_DPM_TABLE_MAX(fclk_table);
if (driver_clocks.BaseClockAc &&
- driver_clocks.BaseClockAc < gfx_table->max)
+ driver_clocks.BaseClockAc < SMU_DPM_TABLE_MAX(gfx_table))
pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
else
- pstate_table->gfxclk_pstate.standard = gfx_table->max;
- pstate_table->uclk_pstate.standard = mem_table->max;
- pstate_table->socclk_pstate.standard = soc_table->min;
- pstate_table->vclk_pstate.standard = vclk_table->min;
- pstate_table->dclk_pstate.standard = dclk_table->min;
- pstate_table->fclk_pstate.standard = fclk_table->min;
+ pstate_table->gfxclk_pstate.standard =
+ SMU_DPM_TABLE_MAX(gfx_table);
+ pstate_table->uclk_pstate.standard = SMU_DPM_TABLE_MAX(mem_table);
+ pstate_table->socclk_pstate.standard = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->vclk_pstate.standard = SMU_DPM_TABLE_MIN(vclk_table);
+ pstate_table->dclk_pstate.standard = SMU_DPM_TABLE_MIN(dclk_table);
+ pstate_table->fclk_pstate.standard = SMU_DPM_TABLE_MIN(fclk_table);
return 0;
}