* to be submitted to the queues after the reset is complete.
*/
if (!ret) {
+ amdgpu_fence_driver_force_completion(gfx_ring);
drm_sched_wqueue_start(&gfx_ring->sched);
- if (adev->sdma.has_page_queue)
+ if (adev->sdma.has_page_queue) {
+ amdgpu_fence_driver_force_completion(page_ring);
drm_sched_wqueue_start(&page_ring->sched);
+ }
}
mutex_unlock(&sdma_instance->engine_reset_mutex);
static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 inst_mask, tmp_mask;
+ u32 inst_mask;
int i, r;
inst_mask = 1 << ring->me;
}
r = sdma_v4_4_2_inst_start(adev, inst_mask, true);
- if (r)
- return r;
-
- tmp_mask = inst_mask;
- for_each_inst(i, tmp_mask) {
- ring = &adev->sdma.instance[i].ring;
-
- amdgpu_fence_driver_force_completion(ring);
-
- if (adev->sdma.has_page_queue) {
- struct amdgpu_ring *page = &adev->sdma.instance[i].page;
-
- amdgpu_fence_driver_force_completion(page);
- }
- }
return r;
}
r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
- if (r)
- return r;
- amdgpu_fence_driver_force_completion(ring);
- return 0;
+
+ return r;
}
static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
- if (r)
- return r;
- amdgpu_fence_driver_force_completion(ring);
- return 0;
+
+ return r;
}
static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)