{
const struct amdgpu_userq_funcs *userq_funcs;
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm;
unsigned long queue_id;
+ /* TODO: We probably need a new lock for the queue state */
xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
- uqm = queue->userq_mgr;
- cancel_delayed_work_sync(&uqm->resume_work);
- if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
- amdgpu_userq_wait_for_last_fence(queue);
- userq_funcs = adev->userq_funcs[queue->queue_type];
- userq_funcs->unmap(queue);
- /* just mark all queues as hung at this point.
- * if unmap succeeds, we could map again
- * in amdgpu_userq_post_reset() if vram is not lost
- */
- queue->state = AMDGPU_USERQ_STATE_HUNG;
- amdgpu_userq_fence_driver_force_completion(queue);
- }
+ if (queue->state != AMDGPU_USERQ_STATE_MAPPED)
+ continue;
+
+ userq_funcs = adev->userq_funcs[queue->queue_type];
+ userq_funcs->unmap(queue);
+ /* just mark all queues as hung at this point.
+ * if unmap succeeds, we could map again
+ * in amdgpu_userq_post_reset() if vram is not lost
+ */
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ amdgpu_userq_fence_driver_force_completion(queue);
}
}