"UNCAPPED",
};
+/**
+ * amdgpu_pm_dev_state_check - Check if device can be accessed.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Return 0 if the device is
+ * accessible or a negative error code otherwise.
+ */
+static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev)
+{
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Use runtime pm API to resume if
+ * needed. Return 0 if the device is accessible or a negative error code
+ * otherwise.
+ */
+static int amdgpu_pm_get_access(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = amdgpu_pm_dev_state_check(adev);
+ if (ret)
+ return ret;
+
+ return pm_runtime_resume_and_get(adev->dev);
+}
+
+/**
+ * amdgpu_pm_get_access_if_active - Check if device is active for access.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Use runtime pm API to determine
+ * if device is active. Allow access only if device is active.Return 0 if the
+ * device is accessible or a negative error code otherwise.
+ */
+static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = amdgpu_pm_dev_state_check(adev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_if_active(adev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_pm_put_access - Put to auto suspend mode after a device access.
+ * @adev: Target device.
+ *
+ * Should be paired with amdgpu_pm_get_access* calls
+ */
+static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
+{
+ pm_runtime_mark_last_busy(adev->dev);
+ pm_runtime_put_autosuspend(adev->dev);
+}
+
/**
* DOC: power_dpm_state
*
enum amd_pm_state_type pm;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_dpm_get_current_power_state(adev, &pm);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
enum amd_pm_state_type state;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
else
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_power_state(adev, state);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
enum amd_dpm_forced_level level = 0xff;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
level = amdgpu_dpm_get_performance_level(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
enum amd_dpm_forced_level level;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
return -EINVAL;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
if (amdgpu_dpm_force_performance_level(adev, level)) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
return -EINVAL;
}
adev->pm.stable_pstate_ctx = NULL;
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
uint32_t i;
int buf_len, ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
if (amdgpu_dpm_get_pp_num_states(adev, &data))
memset(&data, 0, sizeof(data));
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_dpm_get_current_power_state(adev, &pm);
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ ret = amdgpu_pm_dev_state_check(adev);
+ if (ret)
+ return ret;
if (adev->pm.pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
unsigned long idx;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
adev->pm.pp_force_state_enabled = false;
if (strlen(buf) == 1)
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
adev->pm.pp_force_state_enabled = true;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return ret;
}
char *table = NULL;
int size, ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_pp_table(adev, &table);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (size <= 0)
return size;
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_pp_table(adev, buf, count);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (count > 127 || count == 0)
return -EINVAL;
tmp_str++;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
goto err_out;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return -EINVAL;
}
};
uint clk_index;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
uint64_t featuremask;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
ssize_t size;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
int size = 0;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
if (ret == -ENOENT)
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
int ret;
uint32_t mask = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_force_clock_level(adev, type, mask);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
uint32_t value = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
value = amdgpu_dpm_get_sclk_od(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%d\n", value);
}
int ret;
long int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
uint32_t value = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
value = amdgpu_dpm_get_mclk_od(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%d\n", value);
}
int ret;
long int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
ssize_t size;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
tmp[0] = *(buf);
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
}
parameter[parameter_size] = profile_mode;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (!ret)
return count;
{
int r, size = sizeof(uint32_t);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_if_active(adev->dev);
- if (r <= 0)
- return r ?: -EPERM;
+ r = amdgpu_pm_get_access_if_active(adev);
+ if (r)
+ return r;
/* get the sensor value */
r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
return r;
}
uint64_t count0 = 0, count1 = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->flags & AMD_IS_APU)
return -ENODATA;
if (!adev->asic_funcs->get_pcie_usage)
return -ENODATA;
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%llu %llu %i\n",
count0, count1, pcie_get_mps(adev->pdev));
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
+ r = amdgpu_pm_dev_state_check(adev);
+ if (r)
+ return r;
if (adev->unique_id)
return sysfs_emit(buf, "%016llx\n", adev->unique_id);
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
if (!ret)
else
size = sysfs_emit(buf, "failed to get thermal limit\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
return -EINVAL;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
dev_err(dev, "failed to update thermal limit\n");
return ret;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
ssize_t size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
ssize_t size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0)
memcpy(buf, gpu_metrics, size);
out:
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
int r = 0;
int bias = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_resume_and_get(ddev->dev);
- if (r < 0)
- return r;
-
r = kstrtoint(buf, 10, &bias);
if (r)
goto out;
+ r = amdgpu_pm_get_access(adev);
+ if (r < 0)
+ return r;
+
if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
/* TODO: update bias level with SMU message */
out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return r;
}
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
struct amdgpu_pm_policy_attr *policy_attr;
+ int r;
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ r = amdgpu_pm_dev_state_check(adev);
+ if (r)
+ return r;
return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
}
char *tmp, *param;
long val;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
count = min(count, sizeof(tmp_buf));
memcpy(tmp_buf, buf, count);
tmp_buf[count - 1] = '\0';
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
u32 pwm_mode = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
u32 pwm_mode;
int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
else
return -EINVAL;
- ret = pm_runtime_resume_and_get(adev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
u32 value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
int err;
u32 speed = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
int err;
u32 speed = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
int err;
u32 rpm = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
u32 value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
u32 pwm_mode = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
int value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
else
return -EINVAL;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return -EINVAL;
ssize_t size;
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_if_active(adev->dev);
- if (r <= 0)
- return r ?: -EPERM;
+ r = amdgpu_pm_get_access_if_active(adev);
+ if (r)
+ return r;
r = amdgpu_dpm_get_power_limit(adev, &limit,
pp_limit_level, power_type);
else
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
int err;
u32 value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (amdgpu_sriov_vf(adev))
return -EINVAL;
value = value / 1000000; /* convert to Watt */
value |= limit_type << 24;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
err = amdgpu_dpm_set_power_limit(adev, value);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
int size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
long parameter[64];
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = parse_input_od_command_lines(in_buf,
count,
&cmd_type,
if (ret)
return ret;
- ret = pm_runtime_resume_and_get(adev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
goto err_out;
}
- pm_runtime_mark_last_busy(adev->dev);
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(adev->dev);
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return ret;
}
static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
{
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
- struct drm_device *dev = adev_to_drm(adev);
u64 flags = 0;
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_resume_and_get(dev->dev);
+ r = amdgpu_pm_get_access(adev);
if (r < 0)
return r;
seq_printf(m, "\n");
out:
- pm_runtime_put_autosuspend(dev->dev);
+ amdgpu_pm_put_access(adev);
return r;
}
void *smu_prv_buf;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ ret = amdgpu_pm_dev_state_check(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
if (ret)