return DMUB_STATUS_TIMEOUT;
}
-static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
+static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
{
- struct dml2_soc_bb *bb;
+ void *bb;
long long addr;
+ unsigned int bb_size;
int i = 0;
uint16_t chunk;
enum dmub_gpint_command send_addrs[] = {
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(4, 0, 1):
+ bb_size = sizeof(struct dml2_soc_bb);
break;
default:
return NULL;
bb = dm_allocate_gpu_mem(adev,
DC_MEM_ALLOC_TYPE_GART,
- sizeof(struct dml2_soc_bb),
+ bb_size,
&addr);
if (!bb)
return NULL;
* @bb_from_dmub:
*
* Bounding box data read from dmub during early initialization for DCN4+
+ * Data is stored as a byte array that should be casted to the appropriate bb struct
*/
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
/**
* @oem_i2c:
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
uint32_t *clk_reg_offsets;
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
};
struct dc_callback_init {
#include "dml21_translation_helper.h"
#include "bounding_boxes/dcn4_soc_bb.h"
+static void dml21_apply_dmub_bb_params(struct dml2_soc_bb *soc_bb, const void *dmub_bb_params, const struct dc *in_dc)
+{
+ switch (in_dc->ctx->dce_version) {
+ case DCN_VERSION_4_01:
+ default:
+ break;
+ }
+}
+
static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
switch (in_dc->ctx->dce_version) {
case DCN_VERSION_4_01:
default:
- if (config->bb_from_dmub)
- soc_bb = config->bb_from_dmub;
- else
- soc_bb = &dml2_socbb_dcn401;
+ soc_bb = &dml2_socbb_dcn401;
qos_params = &dml_dcn4_variant_a_soc_qos_params;
}
/* patch soc bb */
memcpy(&dml_init->soc_bb, soc_bb, sizeof(struct dml2_soc_bb));
+ if (config->bb_from_dmub)
+ dml21_apply_dmub_bb_params(&dml_init->soc_bb, config->bb_from_dmub, in_dc);
+
/* patch qos params */
memcpy(&dml_init->soc_bb.qos_parameters, qos_params, sizeof(struct dml2_soc_qos_parameters));
}
double z8_stutter_exit_latency_us;
double z8_min_idle_time;
double g6_temp_read_blackout_us[DML_MAX_CLK_TABLE_SIZE];
+ double type_b_dram_clk_change_blackout_us;
+ double type_b_ppt_blackout_us;
};
struct dml2_clk_table {
struct dml2_soc_vmin_clock_limits {
unsigned long dispclk_khz;
+ unsigned long dcfclk_khz;
};
struct dml2_soc_bb {
struct dml2_soc_power_management_parameters power_management_parameters;
struct dml2_soc_vmin_clock_limits vmin_limit;
+ double lower_bound_bandwidth_dchub;
unsigned int dprefclk_mhz;
unsigned int xtalclk_mhz;
unsigned int pcie_refclk_mhz;
bool use_clock_dc_limits;
bool gpuvm_enable;
bool force_tdlut_enable;
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
};
/*
} u; /*<< Low/high bit access */
uint64_t quad_part; /*<< 64 bit address */
};
+
+/* Flattened structure containing SOC BB parameters stored in the VBIOS
+ * It is not practical to store the entire bounding box in VBIOS since the bounding box struct can gain new parameters.
+ * This also prevents alighment issues when new parameters are added to the SoC BB.
+ */
+struct dmub_soc_bb_params {
+ /* dml2_soc_power_management_parameters */
+ uint32_t dram_clk_change_blackout_ns;
+ uint32_t dram_clk_change_read_only_ns;
+ uint32_t dram_clk_change_write_only_ns;
+ uint32_t fclk_change_blackout_ns;
+ uint32_t g7_ppt_blackout_ns;
+ uint32_t stutter_enter_plus_exit_latency_ns;
+ uint32_t stutter_exit_latency_ns;
+ uint32_t z8_stutter_enter_plus_exit_latency_ns;
+ uint32_t z8_stutter_exit_latency_ns;
+ uint32_t z8_min_idle_time_ns;
+ uint32_t type_b_dram_clk_change_blackout_ns;
+ uint32_t type_b_ppt_blackout_ns;
+ /* dml2_soc_vmin_clock_limits */
+ uint32_t vmin_limit_dispclk_khz;
+ uint32_t vmin_limit_dcfclk_khz;
+};
#pragma pack(pop)
/**