]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/amd/display: Match expected data types
authorZheng Austin <Austin.Zheng@amd.com>
Mon, 19 Jan 2026 22:46:39 +0000 (17:46 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 3 Feb 2026 21:43:08 +0000 (16:43 -0500)
[Why/How]
Data types should match what is expected.
Update/cast data accordingly.

Also change ASSERT to use DML_ASSERT instead

Reviewed-by: Dillon Varone <dillon.varone@amd.com>
Signed-off-by: Zheng Austin <Austin.Zheng@amd.com>
Signed-off-by: Wayne Lin <wayne.lin@amd.com>
Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c

index 37699cc9e5c141cb3998c1af23bd65d54eaeca68..ca5ac3c0deb56c16b4f54b2595687880b2c6a169 100644 (file)
@@ -7080,7 +7080,7 @@ static void calculate_excess_vactive_bandwidth_required(
 static double uclk_khz_to_dram_bw_mbps(unsigned long uclk_khz, const struct dml2_dram_params *dram_config, const struct dml2_mcg_dram_bw_to_min_clk_table *dram_bw_table)
 {
        double bw_mbps = 0;
-       int i;
+       unsigned int i;
 
        if (!dram_config->alt_clock_bw_conversion)
                bw_mbps = ((double)uclk_khz * dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock) / 1000.0;
@@ -7091,7 +7091,7 @@ static double uclk_khz_to_dram_bw_mbps(unsigned long uclk_khz, const struct dml2
                                break;
                        }
 
-       ASSERT(bw_mbps > 0);
+       DML_ASSERT(bw_mbps > 0);
 
        return bw_mbps;
 }
index 5c713f2e6eca0df9ce997816da5f79c2daf960de..9d7741fd0adb1d6c27f1ec6db5ff438c3bff733f 100644 (file)
@@ -17,7 +17,7 @@ static double dram_bw_kbps_to_uclk_khz(unsigned long long bandwidth_kbps, const
                uclk_bytes_per_tick = dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock;
                uclk_khz = (double)bandwidth_kbps / uclk_bytes_per_tick;
        } else {
-               int i;
+               unsigned int i;
                /* For lpddr5 bytes per tick changes with mpstate, use table to find uclk*/
                for (i = 0; i < dram_bw_table->num_entries; i++)
                        if (dram_bw_table->entries[i].pre_derate_dram_bw_kbps >= bandwidth_kbps) {
@@ -63,17 +63,17 @@ static void calculate_system_active_minimums(struct dml2_dpmm_map_mode_to_soc_dp
        double min_uclk_latency, min_fclk_latency, min_dcfclk_latency;
        const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
 
-       min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.average_bw_dram_kbps
-                                                                                       / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100),
+       min_uclk_avg = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.active.average_bw_dram_kbps
+                                                                                       / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100)),
                                                        &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table);
 
        if (in_out->display_cfg->display_config.hostvm_enable)
-               min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.urgent_bw_dram_kbps
-                                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel_and_vm / 100),
+               min_uclk_urgent = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.active.urgent_bw_dram_kbps
+                                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel_and_vm / 100)),
                                                         &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table);
        else
-               min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.urgent_bw_dram_kbps
-                                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100),
+               min_uclk_urgent = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.active.urgent_bw_dram_kbps
+                                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100)),
                                                         &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table);
 
        min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg;
@@ -111,12 +111,12 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm
        const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
 
        /* assumes DF throttling is enabled */
-       min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps
-                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_average.dram_derate_percent_pixel / 100),
+       min_uclk_avg = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.svp_prefetch.average_bw_dram_kbps
+                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_average.dram_derate_percent_pixel / 100)),
                                                &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table);
 
-       min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps
-                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel / 100),
+       min_uclk_urgent = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps
+                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel / 100)),
                                                 &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table);
 
        min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg;
@@ -144,12 +144,12 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm
        in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
 
        /* assumes DF throttling is disabled */
-       min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps
-                                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100),
+       min_uclk_avg = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.svp_prefetch.average_bw_dram_kbps
+                                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100)),
                                                                &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table);
 
-       min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps
-                                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100),
+       min_uclk_urgent = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps
+                                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100)),
                                                                &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table);
 
        min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg;
@@ -185,8 +185,8 @@ static void calculate_idle_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_
        double min_uclk_latency, min_fclk_latency, min_dcfclk_latency;
        const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
 
-       min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.average_bw_dram_kbps
-                                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.system_idle_average.dram_derate_percent_pixel / 100),
+       min_uclk_avg = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.active.average_bw_dram_kbps
+                                                                               / ((double)in_out->soc_bb->qos_parameters.derate_table.system_idle_average.dram_derate_percent_pixel / 100)),
                                                                &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table);
 
        min_fclk_avg = (double)mode_support_result->global.active.average_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes;