AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_THM_ACC,
AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC,
AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HBM,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_AID,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_XCD,
AMDGPU_METRICS_ATTR_ID_MAX,
};
#undef pr_info
#undef pr_debug
+#define hbm_stack_mask_valid(umc_mask) \
+ (((umc_mask) & 0x3) == 0x3)
+
+#define for_each_hbm_stack(stack_idx, umc_mask) \
+ for ((stack_idx) = 0; (umc_mask); \
+ (umc_mask) >>= 2, (stack_idx)++) \
+
#define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature) \
[smu_feature] = { 1, (smu_13_0_12_feature) }
struct smu_v13_0_6_gpu_metrics *gpu_metrics)
{
struct amdgpu_device *adev = smu->adev;
- int ret = 0, xcc_id, inst, i, j;
+ int ret = 0, xcc_id, inst, i, j, idx;
u8 num_jpeg_rings_gpu_metrics;
MetricsTable_t *metrics;
gpu_metrics->temperature_vrsoc =
SMUQ10_ROUND(metrics->MaxVrTemperature);
+ if (smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(TEMP_AID_XCD_HBM))) {
+ if (adev->umc.active_mask) {
+ u64 mask = adev->umc.active_mask;
+ int out_idx = 0;
+ int stack_idx;
+
+ if (unlikely(hweight64(mask) / 2 > SMU_13_0_6_MAX_HBM_STACKS)) {
+ dev_warn(adev->dev, "Invalid umc mask %lld\n", mask);
+ } else {
+ for_each_hbm_stack(stack_idx, mask) {
+ if (!hbm_stack_mask_valid(mask))
+ continue;
+ gpu_metrics->temperature_hbm[out_idx++] =
+ metrics->HbmTemperature[stack_idx];
+ }
+ }
+ }
+ idx = 0;
+ for_each_inst(i, adev->aid_mask) {
+ gpu_metrics->temperature_aid[idx] = metrics->AidTemperature[i];
+ idx++;
+ }
+ }
+
gpu_metrics->average_gfx_activity =
SMUQ10_ROUND(metrics->SocketGfxBusy);
gpu_metrics->average_umc_activity =
[i] = SMUQ10_ROUND(
metrics->GfxclkBelowHostLimitTotalAcc[inst]);
}
+ if (smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(TEMP_AID_XCD_HBM)))
+ gpu_metrics->temperature_xcd[i] = metrics->XcdTemperature[inst];
}
gpu_metrics->xgmi_link_width = metrics->XgmiWidth;
SMU_CAP(RAS_EEPROM),
SMU_CAP(FAST_PPT),
SMU_CAP(SYSTEM_POWER_METRICS),
+ SMU_CAP(TEMP_AID_XCD_HBM),
SMU_CAP(ALL),
};
#define SMU_13_0_6_MAX_XCC 8
#define SMU_13_0_6_MAX_VCN 4
#define SMU_13_0_6_MAX_JPEG 40
+#define SMU_13_0_6_MAX_AID 4
+#define SMU_13_0_6_MAX_HBM_STACKS 8
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
SMU_13_0_6_MAX_XCC); \
SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
- SMU_13_0_6_MAX_XCC);
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_HBM), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_hbm, \
+ SMU_13_0_6_MAX_HBM_STACKS); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_AID), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_aid, SMU_13_0_6_MAX_AID); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_XCD), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_xcd, SMU_13_0_6_MAX_XCC); \
+
DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_gpu_metrics, SMU_13_0_6_METRICS_FIELDS);
void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,