* @handle: handle used to pass amdgpu_device pointer
*
*/
-static int acp_hw_fini(void *handle)
+static int acp_hw_fini(struct amdgpu_ip_block *ip_block)
{
u32 val = 0;
u32 count = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* return early if no ACP */
if (!adev->acp.acp_genpd) {
if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_fini(&adev->ip_blocks[i]);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
if (!adev->ip_blocks[i].status.hw)
continue;
- r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_fini(&adev->ip_blocks[i]);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
* @handle: handle for amdgpu_device pointer
*
*/
-static int isp_hw_fini(void *handle)
+static int isp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_isp *isp = &adev->isp;
+ struct amdgpu_isp *isp = &ip_block->adev->isp;
if (isp->funcs->hw_fini != NULL)
return isp->funcs->hw_fini(isp);
return -EINVAL;
}
-static int psp_hw_fini(void *handle)
+static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
if (psp->ta_fw) {
return 0;
}
-static int umsch_mm_hw_fini(void *handle)
+static int umsch_mm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
umsch_mm_ring_stop(&adev->umsch_mm);
static int umsch_mm_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return umsch_mm_hw_fini(adev);
+ return umsch_mm_hw_fini(ip_block);
}
static int umsch_mm_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int amdgpu_vkms_hw_fini(void *handle)
+static int amdgpu_vkms_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
r = drm_mode_config_helper_suspend(adev_to_drm(adev));
if (r)
return r;
- return amdgpu_vkms_hw_fini(adev);
+ return amdgpu_vkms_hw_fini(ip_block);
}
static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int vpe_hw_fini(void *handle)
+static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
vpe_ring_stop(vpe);
cancel_delayed_work_sync(&adev->vpe.idle_work);
- return vpe_hw_fini(adev);
+ return vpe_hw_fini(ip_block);
}
static int vpe_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int cik_common_hw_fini(void *handle)
+static int cik_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
static int cik_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return cik_common_hw_fini(adev);
+ return cik_common_hw_fini(ip_block);
}
static int cik_common_resume(struct amdgpu_ip_block *ip_block)
return cik_ih_irq_init(adev);
}
-static int cik_ih_hw_fini(void *handle)
+static int cik_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cik_ih_irq_disable(adev);
+ cik_ih_irq_disable(ip_block->adev);
return 0;
}
static int cik_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return cik_ih_hw_fini(adev);
+ return cik_ih_hw_fini(ip_block);
}
static int cik_ih_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int cik_sdma_hw_fini(void *handle)
+static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cik_ctx_switch_enable(adev, false);
cik_sdma_enable(adev, false);
static int cik_sdma_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return cik_sdma_hw_fini(adev);
+ return cik_sdma_hw_fini(ip_block);
}
static int cik_sdma_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int cz_ih_hw_fini(void *handle)
+static int cz_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cz_ih_irq_disable(adev);
+ cz_ih_irq_disable(ip_block->adev);
return 0;
}
static int cz_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return cz_ih_hw_fini(adev);
+ return cz_ih_hw_fini(ip_block);
}
static int cz_ih_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int dce_v10_0_hw_fini(void *handle)
+static int dce_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v10_0_hpd_fini(adev);
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
- return dce_v10_0_hw_fini(adev);
+ return dce_v10_0_hw_fini(ip_block);
}
static int dce_v10_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int dce_v11_0_hw_fini(void *handle)
+static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v11_0_hpd_fini(adev);
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
- return dce_v11_0_hw_fini(adev);
+ return dce_v11_0_hw_fini(ip_block);
}
static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int dce_v6_0_hw_fini(void *handle)
+static int dce_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v6_0_hpd_fini(adev);
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
- return dce_v6_0_hw_fini(adev);
+ return dce_v6_0_hw_fini(ip_block);
}
static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int dce_v8_0_hw_fini(void *handle)
+static int dce_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v8_0_hpd_fini(adev);
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
- return dce_v8_0_hw_fini(adev);
+ return dce_v8_0_hw_fini(ip_block);
}
static int dce_v8_0_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int gfx_v10_0_hw_fini(void *handle)
+static int gfx_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
* otherwise the gfxoff disallowing will be failed to set.
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1))
- gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE);
+ gfx_v10_0_set_powergating_state(ip_block->adev, AMD_PG_STATE_UNGATE);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
static int gfx_v10_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return gfx_v10_0_hw_fini(adev);
+ return gfx_v10_0_hw_fini(ip_block);
}
static int gfx_v10_0_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int gfx_v11_0_hw_fini(void *handle)
+static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return gfx_v11_0_hw_fini(adev);
+ return gfx_v11_0_hw_fini(ip_block);
}
static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int gfx_v12_0_hw_fini(void *handle)
+static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t tmp;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
static int gfx_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return gfx_v12_0_hw_fini(adev);
+ return gfx_v12_0_hw_fini(ip_block);
}
static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int gfx_v6_0_hw_fini(void *handle)
+static int gfx_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gfx_v6_0_cp_enable(adev, false);
adev->gfx.rlc.funcs->stop(adev);
static int gfx_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return gfx_v6_0_hw_fini(adev);
+ return gfx_v6_0_hw_fini(ip_block);
}
static int gfx_v6_0_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int gfx_v7_0_hw_fini(void *handle)
+static int gfx_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
static int gfx_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return gfx_v7_0_hw_fini(adev);
+ return gfx_v7_0_hw_fini(ip_block);
}
static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block)
return -ETIMEDOUT;
}
-static int gfx_v8_0_hw_fini(void *handle)
+static int gfx_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ip_block *ip_block;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
return 0;
}
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
- if (!ip_block)
- return -EINVAL;
-
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (!gfx_v8_0_wait_for_idle(ip_block))
gfx_v8_0_cp_enable(adev, false);
static int gfx_v8_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return gfx_v8_0_hw_fini(adev);
+ return gfx_v8_0_hw_fini(ip_block);
}
static int gfx_v8_0_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int gfx_v9_0_hw_fini(void *handle)
+static int gfx_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
static int gfx_v9_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return gfx_v9_0_hw_fini(adev);
+ return gfx_v9_0_hw_fini(ip_block);
}
static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int gfx_v9_4_3_hw_fini(void *handle)
+static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, num_xcc;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return gfx_v9_4_3_hw_fini(adev);
+ return gfx_v9_4_3_hw_fini(ip_block);
}
static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block)
adev->mmhub.funcs->gart_disable(adev);
}
-static int gmc_v10_0_hw_fini(void *handle)
+static int gmc_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v10_0_gart_disable(adev);
static int gmc_v10_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- gmc_v10_0_hw_fini(adev);
+ gmc_v10_0_hw_fini(ip_block);
return 0;
}
adev->mmhub.funcs->gart_disable(adev);
}
-static int gmc_v11_0_hw_fini(void *handle)
+static int gmc_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */
static int gmc_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- gmc_v11_0_hw_fini(adev);
+ gmc_v11_0_hw_fini(ip_block);
return 0;
}
adev->mmhub.funcs->gart_disable(adev);
}
-static int gmc_v12_0_hw_fini(void *handle)
+static int gmc_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */
static int gmc_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- gmc_v12_0_hw_fini(adev);
+ gmc_v12_0_hw_fini(ip_block);
return 0;
}
return 0;
}
-static int gmc_v6_0_hw_fini(void *handle)
+static int gmc_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v6_0_gart_disable(adev);
static int gmc_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- gmc_v6_0_hw_fini(adev);
+ gmc_v6_0_hw_fini(ip_block);
return 0;
}
return 0;
}
-static int gmc_v7_0_hw_fini(void *handle)
+static int gmc_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v7_0_gart_disable(adev);
static int gmc_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- gmc_v7_0_hw_fini(adev);
+ gmc_v7_0_hw_fini(ip_block);
return 0;
}
return 0;
}
-static int gmc_v8_0_hw_fini(void *handle)
+static int gmc_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v8_0_gart_disable(adev);
static int gmc_v8_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- gmc_v8_0_hw_fini(adev);
+ gmc_v8_0_hw_fini(ip_block);
return 0;
}
adev->mmhub.funcs->gart_disable(adev);
}
-static int gmc_v9_0_hw_fini(void *handle)
+static int gmc_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v9_0_gart_disable(adev);
static int gmc_v9_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return gmc_v9_0_hw_fini(adev);
+ return gmc_v9_0_hw_fini(ip_block);
}
static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
return iceland_ih_irq_init(adev);
}
-static int iceland_ih_hw_fini(void *handle)
+static int iceland_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- iceland_ih_irq_disable(adev);
+ iceland_ih_irq_disable(ip_block->adev);
return 0;
}
static int iceland_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return iceland_ih_hw_fini(adev);
+ return iceland_ih_hw_fini(ip_block);
}
static int iceland_ih_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int ih_v6_0_hw_fini(void *handle)
+static int ih_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ih_v6_0_irq_disable(adev);
+ ih_v6_0_irq_disable(ip_block->adev);
return 0;
}
static int ih_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return ih_v6_0_hw_fini(adev);
+ return ih_v6_0_hw_fini(ip_block);
}
static int ih_v6_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int ih_v6_1_hw_fini(void *handle)
+static int ih_v6_1_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ih_v6_1_irq_disable(adev);
+ ih_v6_1_irq_disable(ip_block->adev);
return 0;
}
static int ih_v6_1_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return ih_v6_1_hw_fini(adev);
+ return ih_v6_1_hw_fini(ip_block);
}
static int ih_v6_1_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int ih_v7_0_hw_fini(void *handle)
+static int ih_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ih_v7_0_irq_disable(adev);
+ ih_v7_0_irq_disable(ip_block->adev);
return 0;
}
static int ih_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return ih_v7_0_hw_fini(adev);
+ return ih_v7_0_hw_fini(ip_block);
}
static int ih_v7_0_resume(struct amdgpu_ip_block *ip_block)
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v2_0_hw_fini(void *handle)
+static int jpeg_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
*/
static int jpeg_v2_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
int r;
- r = jpeg_v2_0_hw_fini(adev);
+ r = jpeg_v2_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v2_5_hw_fini(void *handle)
+static int jpeg_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
*/
static int jpeg_v2_5_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
int r;
- r = jpeg_v2_5_hw_fini(adev);
+ r = jpeg_v2_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v3_0_hw_fini(void *handle)
+static int jpeg_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
*/
static int jpeg_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
int r;
- r = jpeg_v3_0_hw_fini(adev);
+ r = jpeg_v3_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v4_0_hw_fini(void *handle)
+static int jpeg_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (!amdgpu_sriov_vf(adev)) {
*/
static int jpeg_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
int r;
- r = jpeg_v4_0_hw_fini(adev);
+ r = jpeg_v4_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v4_0_3_hw_fini(void *handle)
+static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
cancel_delayed_work_sync(&adev->jpeg.idle_work);
*/
static int jpeg_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
int r;
- r = jpeg_v4_0_3_hw_fini(adev);
+ r = jpeg_v4_0_3_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v4_0_5_hw_fini(void *handle)
+static int jpeg_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
*/
static int jpeg_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
int r;
- r = jpeg_v4_0_5_hw_fini(adev);
+ r = jpeg_v4_0_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v5_0_0_hw_fini(void *handle)
+static int jpeg_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
*/
static int jpeg_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
int r;
- r = jpeg_v5_0_0_hw_fini(adev);
+ r = jpeg_v5_0_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes1.bin");
static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block);
-static int mes_v11_0_hw_fini(void *handle);
+static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block);
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
+ if (unlikely(!ip_block)) {
+ dev_err(adev->dev, "Failed to get MES handle\n");
+ return -EINVAL;
+ }
+
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
goto failure;
adev->mes.enable_legacy_queue_map = false;
if (adev->mes.enable_legacy_queue_map) {
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
- if (unlikely(!ip_block)) {
- dev_err(adev->dev, "Failed to get MES handle\n");
- return -EINVAL;
- }
-
r = mes_v11_0_hw_init(ip_block);
if (r)
goto failure;
return r;
failure:
- mes_v11_0_hw_fini(adev);
+ mes_v11_0_hw_fini(ip_block);
return r;
}
return 0;
failure:
- mes_v11_0_hw_fini(adev);
+ mes_v11_0_hw_fini(ip_block);
return r;
}
-static int mes_v11_0_hw_fini(void *handle)
+static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_is_mes_info_enable(adev)) {
amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr,
&adev->mes.resource_1_addr);
static int mes_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = ip_block->adev;
- r = amdgpu_mes_suspend(adev);
+ r = amdgpu_mes_suspend(ip_block->adev);
if (r)
return r;
- return mes_v11_0_hw_fini(adev);
+ return mes_v11_0_hw_fini(ip_block);
}
static int mes_v11_0_resume(struct amdgpu_ip_block *ip_block)
MODULE_FIRMWARE("amdgpu/gc_12_0_1_uni_mes.bin");
static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block);
-static int mes_v12_0_hw_fini(void *handle);
+static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block);
static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev);
static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
mes_v12_0_enable(adev, true);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
+ if (unlikely(!ip_block)) {
+ dev_err(adev->dev, "Failed to get MES handle\n");
+ return -EINVAL;
+ }
+
r = mes_v12_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
goto failure;
}
if (adev->mes.enable_legacy_queue_map) {
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
- if (unlikely(!ip_block)) {
- dev_err(adev->dev, "Failed to get MES handle\n");
- return -EINVAL;
- }
-
r = mes_v12_0_hw_init(ip_block);
if (r)
goto failure;
return r;
failure:
- mes_v12_0_hw_fini(adev);
+ mes_v12_0_hw_fini(ip_block);
return r;
}
return 0;
failure:
- mes_v12_0_hw_fini(adev);
+ mes_v12_0_hw_fini(ip_block);
return r;
}
-static int mes_v12_0_hw_fini(void *handle)
+static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
static int mes_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = ip_block->adev;
- r = amdgpu_mes_suspend(adev);
+ r = amdgpu_mes_suspend(ip_block->adev);
if (r)
return r;
- return mes_v12_0_hw_fini(adev);
+ return mes_v12_0_hw_fini(ip_block);
}
static int mes_v12_0_resume(struct amdgpu_ip_block *ip_block)
return navi10_ih_irq_init(adev);
}
-static int navi10_ih_hw_fini(void *handle)
+static int navi10_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- navi10_ih_irq_disable(adev);
+ navi10_ih_irq_disable(ip_block->adev);
return 0;
}
static int navi10_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return navi10_ih_hw_fini(adev);
+ return navi10_ih_hw_fini(ip_block);
}
static int navi10_ih_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int nv_common_hw_fini(void *handle)
+static int nv_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because nv_enable_doorbell_aperture
static int nv_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return nv_common_hw_fini(adev);
+ return nv_common_hw_fini(ip_block);
}
static int nv_common_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int sdma_v2_4_hw_fini(void *handle)
+static int sdma_v2_4_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- sdma_v2_4_enable(adev, false);
+ sdma_v2_4_enable(ip_block->adev, false);
return 0;
}
static int sdma_v2_4_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return sdma_v2_4_hw_fini(adev);
+ return sdma_v2_4_hw_fini(ip_block);
}
static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int sdma_v3_0_hw_fini(void *handle)
+static int sdma_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false);
static int sdma_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return sdma_v3_0_hw_fini(adev);
+ return sdma_v3_0_hw_fini(ip_block);
}
static int sdma_v3_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v4_0_start(adev);
}
-static int sdma_v4_0_hw_fini(void *handle)
+static int sdma_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
if (amdgpu_sriov_vf(adev))
return 0;
}
- return sdma_v4_0_hw_fini(adev);
+ return sdma_v4_0_hw_fini(ip_block);
}
static int sdma_v4_0_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int sdma_v4_4_2_hw_fini(void *handle)
+static int sdma_v4_4_2_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t inst_mask;
int i;
if (amdgpu_in_reset(adev))
sdma_v4_4_2_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
- return sdma_v4_4_2_hw_fini(adev);
+ return sdma_v4_4_2_hw_fini(ip_block);
}
static int sdma_v4_4_2_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int sdma_v5_0_hw_fini(void *handle)
+static int sdma_v5_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
static int sdma_v5_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return sdma_v5_0_hw_fini(adev);
+ return sdma_v5_0_hw_fini(ip_block);
}
static int sdma_v5_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v5_2_start(adev);
}
-static int sdma_v5_2_hw_fini(void *handle)
+static int sdma_v5_2_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
static int sdma_v5_2_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return sdma_v5_2_hw_fini(adev);
+ return sdma_v5_2_hw_fini(ip_block);
}
static int sdma_v5_2_resume(struct amdgpu_ip_block *ip_block)
return sdma_v6_0_start(adev);
}
-static int sdma_v6_0_hw_fini(void *handle)
+static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
static int sdma_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return sdma_v6_0_hw_fini(adev);
+ return sdma_v6_0_hw_fini(ip_block);
}
static int sdma_v6_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v7_0_start(adev);
}
-static int sdma_v7_0_hw_fini(void *handle)
+static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
static int sdma_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return sdma_v7_0_hw_fini(adev);
+ return sdma_v7_0_hw_fini(ip_block);
}
static int sdma_v7_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int si_common_hw_fini(void *handle)
+static int si_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
static int si_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return si_common_hw_fini(adev);
+ return si_common_hw_fini(ip_block);
}
static int si_common_resume(struct amdgpu_ip_block *ip_block)
return si_dma_start(adev);
}
-static int si_dma_hw_fini(void *handle)
+static int si_dma_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- si_dma_stop(adev);
+ si_dma_stop(ip_block->adev);
return 0;
}
static int si_dma_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return si_dma_hw_fini(adev);
+ return si_dma_hw_fini(ip_block);
}
static int si_dma_resume(struct amdgpu_ip_block *ip_block)
return si_ih_irq_init(adev);
}
-static int si_ih_hw_fini(void *handle)
+static int si_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- si_ih_irq_disable(adev);
+ si_ih_irq_disable(ip_block->adev);
return 0;
}
static int si_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return si_ih_hw_fini(adev);
+ return si_ih_hw_fini(ip_block);
}
static int si_ih_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int soc15_common_hw_fini(void *handle)
+static int soc15_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because soc15_enable_doorbell_aperture
static int soc15_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return soc15_common_hw_fini(adev);
+ return soc15_common_hw_fini(ip_block);
}
static int soc15_common_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int soc21_common_hw_fini(void *handle)
+static int soc21_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because soc21_enable_doorbell_aperture
static int soc21_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return soc21_common_hw_fini(adev);
+ return soc21_common_hw_fini(ip_block);
}
static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
return 0;
}
-static int soc24_common_hw_fini(void *handle)
+static int soc24_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because soc21_enable_doorbell_aperture
static int soc24_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return soc24_common_hw_fini(adev);
+ return soc24_common_hw_fini(ip_block);
}
static int soc24_common_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int tonga_ih_hw_fini(void *handle)
+static int tonga_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- tonga_ih_irq_disable(adev);
+ tonga_ih_irq_disable(ip_block->adev);
return 0;
}
static int tonga_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return tonga_ih_hw_fini(adev);
+ return tonga_ih_hw_fini(ip_block);
}
static int tonga_ih_resume(struct amdgpu_ip_block *ip_block)
static int tonga_ih_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- if (!adev->irq.srbm_soft_reset)
+ if (!ip_block->adev->irq.srbm_soft_reset)
return 0;
- return tonga_ih_hw_fini(adev);
+ return tonga_ih_hw_fini(ip_block);
}
static int tonga_ih_post_soft_reset(struct amdgpu_ip_block *ip_block)
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v3_1_hw_fini(void *handle)
+static int uvd_v3_1_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
AMD_CG_STATE_GATE);
}
- r = uvd_v3_1_hw_fini(adev);
+ r = uvd_v3_1_hw_fini(ip_block);
if (r)
return r;
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v4_2_hw_fini(void *handle)
+static int uvd_v4_2_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
AMD_CG_STATE_GATE);
}
- r = uvd_v4_2_hw_fini(adev);
+ r = uvd_v4_2_hw_fini(ip_block);
if (r)
return r;
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v5_0_hw_fini(void *handle)
+static int uvd_v5_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
AMD_CG_STATE_GATE);
}
- r = uvd_v5_0_hw_fini(adev);
+ r = uvd_v5_0_hw_fini(ip_block);
if (r)
return r;
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v6_0_hw_fini(void *handle)
+static int uvd_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
AMD_CG_STATE_GATE);
}
- r = uvd_v6_0_hw_fini(adev);
+ r = uvd_v6_0_hw_fini(ip_block);
if (r)
return r;
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v7_0_hw_fini(void *handle)
+static int uvd_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
AMD_CG_STATE_GATE);
}
- r = uvd_v7_0_hw_fini(adev);
+ r = uvd_v7_0_hw_fini(ip_block);
if (r)
return r;
return 0;
}
-static int vce_v2_0_hw_fini(void *handle)
+static int vce_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cancel_delayed_work_sync(&adev->vce.idle_work);
+ cancel_delayed_work_sync(&ip_block->adev->vce.idle_work);
return 0;
}
AMD_CG_STATE_GATE);
}
- r = vce_v2_0_hw_fini(adev);
+ r = vce_v2_0_hw_fini(ip_block);
if (r)
return r;
return 0;
}
-static int vce_v3_0_hw_fini(void *handle)
+static int vce_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ip_block *ip_block;
-
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
- if (!ip_block)
- return -EINVAL;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vce.idle_work);
AMD_CG_STATE_GATE);
}
- r = vce_v3_0_hw_fini(adev);
+ r = vce_v3_0_hw_fini(ip_block);
if (r)
return r;
return 0;
}
-static int vce_v4_0_hw_fini(void *handle)
+static int vce_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ip_block *ip_block;
-
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
- if (!ip_block)
- return -EINVAL;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vce.idle_work);
AMD_CG_STATE_GATE);
}
- r = vce_v4_0_hw_fini(adev);
+ r = vce_v4_0_hw_fini(ip_block);
if (r)
return r;
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v1_0_hw_fini(void *handle)
+static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
amdgpu_dpm_enable_uvd(adev, false);
}
- r = vcn_v1_0_hw_fini(adev);
+ r = vcn_v1_0_hw_fini(ip_block);
if (r)
return r;
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v2_0_hw_fini(void *handle)
+static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
static int vcn_v2_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = ip_block->adev;
- r = vcn_v2_0_hw_fini(adev);
+ r = vcn_v2_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v2_5_hw_fini(void *handle)
+static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = ip_block->adev;
- r = vcn_v2_5_hw_fini(adev);
+ r = vcn_v2_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v3_0_hw_fini(void *handle)
+static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = ip_block->adev;
- r = vcn_v3_0_hw_fini(adev);
+ r = vcn_v3_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v4_0_hw_fini(void *handle)
+static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = ip_block->adev;
- r = vcn_v4_0_hw_fini(adev);
+ r = vcn_v4_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v4_0_3_hw_fini(void *handle)
+static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
*/
static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
int r;
- r = vcn_v4_0_3_hw_fini(adev);
+ r = vcn_v4_0_3_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v4_0_5_hw_fini(void *handle)
+static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = ip_block->adev;
- r = vcn_v4_0_5_hw_fini(adev);
+ r = vcn_v4_0_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v5_0_0_hw_fini(void *handle)
+static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = ip_block->adev;
- r = vcn_v5_0_0_hw_fini(adev);
+ r = vcn_v5_0_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
return vega10_ih_irq_init(ip_block->adev);
}
-static int vega10_ih_hw_fini(void *handle)
+static int vega10_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- vega10_ih_irq_disable(adev);
+ vega10_ih_irq_disable(ip_block->adev);
return 0;
}
static int vega10_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return vega10_ih_hw_fini(adev);
+ return vega10_ih_hw_fini(ip_block);
}
static int vega10_ih_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int vega20_ih_hw_fini(void *handle)
+static int vega20_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- vega20_ih_irq_disable(adev);
+ vega20_ih_irq_disable(ip_block->adev);
return 0;
}
static int vega20_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return vega20_ih_hw_fini(adev);
+ return vega20_ih_hw_fini(ip_block);
}
static int vega20_ih_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int vi_common_hw_fini(void *handle)
+static int vi_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* enable the doorbell aperture */
vi_enable_doorbell_aperture(adev, false);
static int vi_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- return vi_common_hw_fini(adev);
+ return vi_common_hw_fini(ip_block);
}
static int vi_common_resume(struct amdgpu_ip_block *ip_block)
* cleanup. This involves cleaning up the DRM device, DC, and any modules that
* were loaded. Also flush IRQ workqueues and disable them.
*/
-static int dm_hw_fini(void *handle)
+static int dm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_dm_hpd_fini(adev);
int (*sw_fini)(struct amdgpu_ip_block *ip_block);
int (*early_fini)(struct amdgpu_ip_block *ip_block);
int (*hw_init)(struct amdgpu_ip_block *ip_block);
- int (*hw_fini)(void *handle);
+ int (*hw_fini)(struct amdgpu_ip_block *ip_block);
void (*late_fini)(struct amdgpu_ip_block *ip_block);
int (*prepare_suspend)(struct amdgpu_ip_block *ip_block);
int (*suspend)(struct amdgpu_ip_block *ip_block);
return ret;
}
-static int kv_dpm_hw_fini(void *handle)
+static int kv_dpm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled)
kv_dpm_disable(adev);
return ret;
}
-static int si_dpm_hw_fini(void *handle)
+static int si_dpm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled)
si_dpm_disable(adev);
return ret;
}
-static int pp_hw_fini(void *handle)
+static int pp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle;
cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
return ret;
}
-static int smu_hw_fini(void *handle)
+static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
if (!ip_block)
return -EINVAL;
- ret = smu_hw_fini(adev);
+ ret = smu_hw_fini(ip_block);
if (ret)
return ret;