} else if (amdgpu_gpu_recovery && ring->funcs->reset) {
dev_err(adev->dev, "Starting %s ring reset\n",
s_job->sched->name);
-
- /*
- * Stop the scheduler to prevent anybody else from touching the
- * ring buffer.
- */
- drm_sched_wqueue_stop(&ring->sched);
-
r = amdgpu_ring_reset(ring, job->vmid, NULL);
if (!r) {
atomic_inc(&ring->adev->gpu_reset_counter);
- drm_sched_wqueue_start(&ring->sched);
dev_err(adev->dev, "Ring %s reset succeeded\n",
ring->sched.name);
drm_dev_wedged_event(adev_to_drm(adev),
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
struct amdgpu_ring *page_ring = &sdma_instance->page;
- bool gfx_sched_stopped = false, page_sched_stopped = false;
mutex_lock(&sdma_instance->engine_reset_mutex);
/* Stop the scheduler's work queue for the GFX and page rings if they are running.
* This ensures that no new tasks are submitted to the queues while
* the reset is in progress.
*/
- if (!amdgpu_ring_sched_ready(gfx_ring)) {
- drm_sched_wqueue_stop(&gfx_ring->sched);
- gfx_sched_stopped = true;
- }
+ drm_sched_wqueue_stop(&gfx_ring->sched);
- if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) {
+ if (adev->sdma.has_page_queue)
drm_sched_wqueue_stop(&page_ring->sched);
- page_sched_stopped = true;
- }
if (sdma_instance->funcs->stop_kernel_queue) {
sdma_instance->funcs->stop_kernel_queue(gfx_ring);
* to be submitted to the queues after the reset is complete.
*/
if (!ret) {
- if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) {
- drm_sched_wqueue_start(&gfx_ring->sched);
- }
- if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) {
+ drm_sched_wqueue_start(&gfx_ring->sched);
+ if (adev->sdma.has_page_queue)
drm_sched_wqueue_start(&page_ring->sched);
- }
}
mutex_unlock(&sdma_instance->engine_reset_mutex);
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ drm_sched_wqueue_stop(&ring->sched);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) {
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ drm_sched_wqueue_stop(&ring->sched);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (amdgpu_sriov_vf(adev))
return -EINVAL;
+ drm_sched_wqueue_stop(&ring->sched);
+
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (amdgpu_sriov_vf(adev))
return -EINVAL;
+ drm_sched_wqueue_stop(&ring->sched);
+
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (amdgpu_sriov_vf(adev))
return -EINVAL;
+ drm_sched_wqueue_stop(&ring->sched);
+
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (amdgpu_sriov_vf(adev))
return -EINVAL;
+ drm_sched_wqueue_stop(&ring->sched);
+
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ drm_sched_wqueue_stop(&ring->sched);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ drm_sched_wqueue_stop(&ring->sched);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
{
int r;
+ drm_sched_wqueue_stop(&ring->sched);
jpeg_v2_0_stop(ring->adev);
jpeg_v2_0_start(ring->adev);
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
{
int r;
+ drm_sched_wqueue_stop(&ring->sched);
jpeg_v2_5_stop_inst(ring->adev, ring->me);
jpeg_v2_5_start_inst(ring->adev, ring->me);
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
{
int r;
+ drm_sched_wqueue_stop(&ring->sched);
jpeg_v3_0_stop(ring->adev);
jpeg_v3_0_start(ring->adev);
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (amdgpu_sriov_vf(ring->adev))
return -EINVAL;
+ drm_sched_wqueue_stop(&ring->sched);
jpeg_v4_0_stop(ring->adev);
jpeg_v4_0_start(ring->adev);
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (amdgpu_sriov_vf(ring->adev))
return -EOPNOTSUPP;
+ drm_sched_wqueue_stop(&ring->sched);
jpeg_v4_0_3_core_stall_reset(ring);
jpeg_v4_0_3_start_jrbc(ring);
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (amdgpu_sriov_vf(ring->adev))
return -EOPNOTSUPP;
+ drm_sched_wqueue_stop(&ring->sched);
jpeg_v5_0_1_core_stall_reset(ring);
jpeg_v5_0_1_init_jrbc(ring);
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
return -EINVAL;
}
+ drm_sched_wqueue_stop(&ring->sched);
+
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
if (r)
return r;
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
return -EINVAL;
}
+ drm_sched_wqueue_stop(&ring->sched);
+
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
if (r)
return r;
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
return -EOPNOTSUPP;
+ drm_sched_wqueue_stop(&ring->sched);
vcn_v4_0_stop(vinst);
vcn_v4_0_start(vinst);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
return -EOPNOTSUPP;
+ drm_sched_wqueue_stop(&ring->sched);
+
vcn_inst = GET_INST(VCN, ring->me);
r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
return -EOPNOTSUPP;
+ drm_sched_wqueue_stop(&ring->sched);
vcn_v4_0_5_stop(vinst);
vcn_v4_0_5_start(vinst);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
return -EOPNOTSUPP;
+ drm_sched_wqueue_stop(&ring->sched);
vcn_v5_0_0_stop(vinst);
vcn_v5_0_0_start(vinst);
if (r)
return r;
amdgpu_fence_driver_force_completion(ring);
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}