atomic_set(&adev->reset_domain->reset_res, r);
- if (!r)
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
+ if (!r) {
+ struct amdgpu_task_info *ti = NULL;
+
+ if (job)
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
+ ti ? &ti->task : NULL);
+
+ amdgpu_vm_put_task_info(ti);
+ }
return r;
}
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
+ struct drm_wedge_task_info *info = NULL;
struct amdgpu_task_info *ti;
struct amdgpu_device *adev = ring->adev;
int idx;
ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
if (ti) {
amdgpu_vm_print_task_info(adev, ti);
- amdgpu_vm_put_task_info(ti);
+ info = &ti->task;
}
/* attempt a per ring reset */
if (amdgpu_ring_sched_ready(ring))
drm_sched_start(&ring->sched, 0);
dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name);
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, info);
goto exit;
}
dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
}
dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
+ amdgpu_vm_put_task_info(ti);
+
if (amdgpu_device_should_recover_gpu(ring->adev)) {
struct amdgpu_reset_context reset_context;
memset(&reset_context, 0, sizeof(reset_context));