static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
{
- struct smu_feature *feature = &smu->smu_feature;
uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
int ret = 0;
* such scenario.
*/
if (smu->adev->scpm_enabled) {
- bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
+ smu_feature_list_set_all(smu, SMU_FEATURE_LIST_ALLOWED);
return 0;
}
- bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
+ smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_ALLOWED);
ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
SMU_FEATURE_MAX/32);
if (ret)
return ret;
- bitmap_or(feature->allowed, feature->allowed,
- (unsigned long *)allowed_feature_mask,
- feature->feature_num);
+ smu_feature_list_add_bits(smu, SMU_FEATURE_LIST_ALLOWED,
+ (unsigned long *)allowed_feature_mask);
return ret;
}
int i, ret;
smu->pool_size = adev->pm.smu_prv_buffer_size;
- smu->smu_feature.feature_num = SMU_FEATURE_MAX;
- bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
- bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
+ smu_feature_init(smu, SMU_FEATURE_MAX);
INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
static int smu_smc_hw_setup(struct smu_context *smu)
{
- struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev;
uint8_t pcie_gen = 0, pcie_width = 0;
uint64_t features_supported;
dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
return ret;
}
- bitmap_copy(feature->supported,
- (unsigned long *)&features_supported,
- feature->feature_num);
+ smu_feature_list_set_bits(smu, SMU_FEATURE_LIST_SUPPORTED,
+ (unsigned long *)&features_supported);
if (!smu_is_dpm_running(smu))
dev_info(adev->dev, "dpm has been disabled\n");
DECLARE_BITMAP(bits, SMU_FEATURE_MAX);
};
+enum smu_feature_list {
+ SMU_FEATURE_LIST_SUPPORTED,
+ SMU_FEATURE_LIST_ALLOWED,
+ SMU_FEATURE_LIST_MAX,
+};
+
struct smu_feature {
uint32_t feature_num;
- DECLARE_BITMAP(supported, SMU_FEATURE_MAX);
- DECLARE_BITMAP(allowed, SMU_FEATURE_MAX);
+ struct smu_feature_bits bits[SMU_FEATURE_LIST_MAX];
};
struct smu_clocks {
bitmap_or(dst->bits, src1->bits, src2, nbits);
}
+static inline struct smu_feature_bits *
+__smu_feature_get_list(struct smu_context *smu, enum smu_feature_list list)
+{
+ if (unlikely(list >= SMU_FEATURE_LIST_MAX)) {
+ dev_warn(smu->adev->dev, "Invalid feature list: %d\n", list);
+ return &smu->smu_feature.bits[SMU_FEATURE_LIST_SUPPORTED];
+ }
+
+ return &smu->smu_feature.bits[list];
+}
+
+static inline bool smu_feature_list_is_set(struct smu_context *smu,
+ enum smu_feature_list list,
+ unsigned int bit)
+{
+ if (bit >= smu->smu_feature.feature_num)
+ return false;
+
+ return smu_feature_bits_is_set(__smu_feature_get_list(smu, list), bit);
+}
+
+static inline void smu_feature_list_set_bit(struct smu_context *smu,
+ enum smu_feature_list list,
+ unsigned int bit)
+{
+ if (bit >= smu->smu_feature.feature_num)
+ return;
+
+ smu_feature_bits_set_bit(__smu_feature_get_list(smu, list), bit);
+}
+
+static inline void smu_feature_list_clear_bit(struct smu_context *smu,
+ enum smu_feature_list list,
+ unsigned int bit)
+{
+ if (bit >= smu->smu_feature.feature_num)
+ return;
+
+ smu_feature_bits_clear_bit(__smu_feature_get_list(smu, list), bit);
+}
+
+static inline void smu_feature_list_set_all(struct smu_context *smu,
+ enum smu_feature_list list)
+{
+ smu_feature_bits_fill(__smu_feature_get_list(smu, list));
+}
+
+static inline void smu_feature_list_clear_all(struct smu_context *smu,
+ enum smu_feature_list list)
+{
+ smu_feature_bits_clearall(__smu_feature_get_list(smu, list));
+}
+
+static inline bool smu_feature_list_is_empty(struct smu_context *smu,
+ enum smu_feature_list list)
+{
+ return smu_feature_bits_empty(__smu_feature_get_list(smu, list),
+ smu->smu_feature.feature_num);
+}
+
+static inline void smu_feature_list_set_bits(struct smu_context *smu,
+ enum smu_feature_list dst_list,
+ const unsigned long *src)
+{
+ smu_feature_bits_copy(__smu_feature_get_list(smu, dst_list), src,
+ smu->smu_feature.feature_num);
+}
+
+static inline void smu_feature_list_add_bits(struct smu_context *smu,
+ enum smu_feature_list list,
+ const unsigned long *src)
+{
+ struct smu_feature_bits *bits = __smu_feature_get_list(smu, list);
+
+ smu_feature_bits_or(bits, bits, src, smu->smu_feature.feature_num);
+}
+
+static inline void smu_feature_list_to_arr32(struct smu_context *smu,
+ enum smu_feature_list list,
+ uint32_t *arr)
+{
+ smu_feature_bits_to_arr32(__smu_feature_get_list(smu, list), arr,
+ smu->smu_feature.feature_num);
+}
+
+static inline void smu_feature_init(struct smu_context *smu, int feature_num)
+{
+ if (!feature_num || smu->smu_feature.feature_num != 0)
+ return;
+
+ smu->smu_feature.feature_num = feature_num;
+ smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_SUPPORTED);
+ smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_ALLOWED);
+}
+
#endif
int ret = 0;
uint32_t feature_mask[2];
- if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) {
+ if (smu_feature_list_is_empty(smu, SMU_FEATURE_LIST_ALLOWED) ||
+ feature->feature_num < 64) {
ret = -EINVAL;
goto failed;
}
- bitmap_to_arr32(feature_mask, feature->allowed, 64);
+ smu_feature_list_to_arr32(smu, SMU_FEATURE_LIST_ALLOWED, feature_mask);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
int ret = 0;
uint32_t feature_mask[2];
- if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
+ if (smu_feature_list_is_empty(smu, SMU_FEATURE_LIST_ALLOWED) ||
feature->feature_num < 64)
return -EINVAL;
- bitmap_to_arr32(feature_mask, feature->allowed, 64);
+ smu_feature_list_to_arr32(smu, SMU_FEATURE_LIST_ALLOWED, feature_mask);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
int ret = 0;
uint32_t feature_mask[2];
- if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
+ if (smu_feature_list_is_empty(smu, SMU_FEATURE_LIST_ALLOWED) ||
feature->feature_num < 64)
return -EINVAL;
- bitmap_to_arr32(feature_mask, feature->allowed, 64);
+ smu_feature_list_to_arr32(smu, SMU_FEATURE_LIST_ALLOWED, feature_mask);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
int ret = 0;
uint32_t feature_mask[2];
- if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
+ if (smu_feature_list_is_empty(smu, SMU_FEATURE_LIST_ALLOWED) ||
feature->feature_num < 64)
return -EINVAL;
- bitmap_to_arr32(feature_mask, feature->allowed, 64);
+ smu_feature_list_to_arr32(smu, SMU_FEATURE_LIST_ALLOWED, feature_mask);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
int smu_cmn_feature_is_supported(struct smu_context *smu,
enum smu_feature_mask mask)
{
- struct smu_feature *feature = &smu->smu_feature;
int feature_id;
feature_id = smu_cmn_to_asic_specific_index(smu,
if (feature_id < 0)
return 0;
- WARN_ON(feature_id > feature->feature_num);
-
- return test_bit(feature_id, feature->supported);
+ return smu_feature_list_is_set(smu, SMU_FEATURE_LIST_SUPPORTED,
+ feature_id);
}
static int __smu_get_enabled_features(struct smu_context *smu,