DECLARE_BITMAP(bits, SMU_FEATURE_MAX);
};
+/*
+ * Helpers for initializing smu_feature_bits statically.
+ * Use SMU_FEATURE_BIT_INIT() which automatically handles array indexing:
+ * static const struct smu_feature_bits example = {
+ * .bits = {
+ * SMU_FEATURE_BIT_INIT(5),
+ * SMU_FEATURE_BIT_INIT(10),
+ * SMU_FEATURE_BIT_INIT(65),
+ * SMU_FEATURE_BIT_INIT(100)
+ * }
+ * };
+ */
+#define SMU_FEATURE_BITS_ELEM(bit) ((bit) / BITS_PER_LONG)
+#define SMU_FEATURE_BITS_POS(bit) ((bit) % BITS_PER_LONG)
+#define SMU_FEATURE_BIT_INIT(bit) \
+ [SMU_FEATURE_BITS_ELEM(bit)] = (1UL << SMU_FEATURE_BITS_POS(bit))
+
enum smu_feature_list {
SMU_FEATURE_LIST_SUPPORTED,
SMU_FEATURE_LIST_ALLOWED,
#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
#define SMU_FEATURES_HIGH_SHIFT 32
-#define SMC_DPM_FEATURE ( \
- FEATURE_DPM_PREFETCHER_MASK | \
- FEATURE_DPM_GFXCLK_MASK | \
- FEATURE_DPM_UCLK_MASK | \
- FEATURE_DPM_SOCCLK_MASK | \
- FEATURE_DPM_MP0CLK_MASK | \
- FEATURE_DPM_FCLK_MASK | \
- FEATURE_DPM_XGMI_MASK)
+static const struct smu_feature_bits arcturus_dpm_features = {
+ .bits = { SMU_FEATURE_BIT_INIT(FEATURE_DPM_PREFETCHER_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_XGMI_BIT) }
+};
#define smnPCIE_ESM_CTRL 0x111003D0
{
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&arcturus_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static int arcturus_dpm_set_vcn_enable(struct smu_context *smu,
static uint32_t cyan_skillfish_sclk_default;
-#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_SOC_DPM_BIT) | \
- FEATURE_MASK(FEATURE_GFX_DPM_BIT))
+static const struct smu_feature_bits cyan_skillfish_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SOC_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT)
+ }
+};
static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
struct amdgpu_device *adev = smu->adev;
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
/* we need to re-init after suspend so return false */
if (adev->in_suspend)
cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK,
&cyan_skillfish_sclk_default);
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&cyan_skillfish_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu,
#undef pr_info
#undef pr_debug
-#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
- FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT) | \
- FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
+static const struct smu_feature_bits navi10_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_PREFETCHER_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFX_PACE_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_DCEFCLK_BIT)
+ }
+};
#define SMU_11_0_GFX_BUSY_THRESHOLD 15
{
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&navi10_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static int navi10_get_fan_speed_rpm(struct smu_context *smu,
#undef pr_info
#undef pr_debug
-#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
- FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT))
+static const struct smu_feature_bits sienna_cichlid_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_PREFETCHER_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_DCEFCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT)
+ }
+};
#define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
{
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&sienna_cichlid_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static int sienna_cichlid_get_fan_speed_rpm(struct smu_context *smu,
#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
-#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
- FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
- FEATURE_MASK(FEATURE_GFX_DPM_BIT))
+static const struct smu_feature_bits vangogh_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_MP0CLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT)
+ }
+};
static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
struct amdgpu_device *adev = smu->adev;
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
/* we need to re-init after suspend so return false */
if (adev->in_suspend)
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&vangogh_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
[smu_feature] = {1, (aldebaran_feature)}
#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_DATA_CALCULATIONS) | \
- FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_LCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_XGMI_BIT) | \
- FEATURE_MASK(FEATURE_DPM_VCN_BIT))
+static const struct smu_feature_bits aldebaran_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_DATA_CALCULATIONS),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_LCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_XGMI_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_VCN_BIT)
+ }
+};
#define smnPCIE_ESM_CTRL 0x111003D0
{
int ret;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&aldebaran_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT))
+static const struct smu_feature_bits smu_v13_0_0_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT)
+ }
+};
#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
{
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&smu_v13_0_0_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static int smu_v13_0_0_system_features_control(struct smu_context *smu,
#define mmMP1_SMN_C2PMSG_90 0x029a
#define mmMP1_SMN_C2PMSG_90_BASE_IDX 1
-#define FEATURE_MASK(feature) (1ULL << feature)
-
#define SMU_13_0_4_UMD_PSTATE_GFXCLK 938
#define SMU_13_0_4_UMD_PSTATE_SOCCLK 938
#define SMU_13_0_4_UMD_PSTATE_FCLK 1875
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
- FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_ISP_DPM_BIT) | \
- FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \
- FEATURE_MASK(FEATURE_GFX_DPM_BIT))
+static const struct smu_feature_bits smu_v13_0_4_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_MP0CLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_ISP_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_IPU_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT)
+ }
+};
static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
{
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&smu_v13_0_4_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
#define mmMP1_C2PMSG_33 (0xbee261 + 0xb00000 / 4)
#define mmMP1_C2PMSG_33_BASE_IDX 0
-#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \
- FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
- FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT)| \
- FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)| \
- FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT))
+static const struct smu_feature_bits smu_v13_0_5_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_MP0CLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT)
+ }
+};
static struct cmn2asic_msg_mapping smu_v13_0_5_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
{
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&smu_v13_0_5_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static int smu_v13_0_5_mode_reset(struct smu_context *smu, int type)
[smu_feature] = { 1, (smu_13_0_6_feature) }
#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE \
- (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \
- FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) | \
- FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) | \
- FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) | \
- FEATURE_MASK(FEATURE_DPM_VCN))
+static const struct smu_feature_bits smu_v13_0_6_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_DATA_CALCULATION),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_LCLK),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_XGMI),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_VCN)
+ }
+};
#define smnPCIE_ESM_CTRL 0x93D0
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288
{
int ret;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
return smu_v13_0_12_is_dpm_running(smu);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&smu_v13_0_6_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT))
+static const struct smu_feature_bits smu_v13_0_7_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT)
+ }
+};
#define smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 0x3b10028
{
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&smu_v13_0_7_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics)
#define SMU_13_0_1_UMD_PSTATE_SOCCLK 678
#define SMU_13_0_1_UMD_PSTATE_FCLK 1800
-#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
- FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
- FEATURE_MASK(FEATURE_GFX_DPM_BIT))
+static const struct smu_feature_bits yellow_carp_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_MP0CLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT)
+ }
+};
static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
{
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&yellow_carp_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static int yellow_carp_post_smu_init(struct smu_context *smu)
#define SMU_14_0_4_UMD_PSTATE_GFXCLK 938
#define SMU_14_0_4_UMD_PSTATE_SOCCLK 938
-#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
- FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
- FEATURE_MASK(FEATURE_ISP_DPM_BIT)| \
- FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \
- FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \
- FEATURE_MASK(FEATURE_VPE_DPM_BIT))
+static const struct smu_feature_bits smu_v14_0_0_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_ISP_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_IPU_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_VPE_DPM_BIT)
+ }
+};
enum smu_mall_pg_config {
SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0,
{
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&smu_v14_0_0_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static int smu_v14_0_0_set_watermarks_table(struct smu_context *smu,
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_FCLK_BIT))
+static const struct smu_feature_bits smu_v14_0_2_dpm_features = {
+ .bits = { SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT) }
+};
#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
#define DEBUGSMC_MSG_Mode1Reset 2
{
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&smu_v14_0_2_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics)
#define SMU_15_0_UMD_PSTATE_FCLK 1800
-#define FEATURE_MASK(feature) (1ULL << feature)
-#define SMC_DPM_FEATURE ( \
- FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
- FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \
- FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
- FEATURE_MASK(FEATURE_ISP_DPM_BIT)| \
- FEATURE_MASK(FEATURE_NPU_DPM_BIT) | \
- FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \
- FEATURE_MASK(FEATURE_VPE_DPM_BIT))
+static const struct smu_feature_bits smu_v15_0_0_dpm_features = {
+ .bits = {
+ SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_ISP_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_NPU_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT),
+ SMU_FEATURE_BIT_INIT(FEATURE_VPE_DPM_BIT)
+ }
+};
enum smu_mall_pg_config {
SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0,
{
int ret = 0;
uint64_t feature_enabled;
+ uint32_t feature_mask[2];
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- return !!(feature_enabled & SMC_DPM_FEATURE);
+ smu_feature_bits_to_arr32(&smu_v15_0_0_dpm_features, feature_mask, 64);
+ return !!(feature_enabled & *(uint64_t *)feature_mask);
}
static int smu_v15_0_0_set_watermarks_table(struct smu_context *smu,