amdgpu_userq_fence_driver_put(userq->fence_drv);
}
+static void
+amdgpu_userq_fence_put_fence_drv_array(struct amdgpu_userq_fence *userq_fence)
+{
+ unsigned long i;
+ for (i = 0; i < userq_fence->fence_drv_array_count; i++)
+ amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
+ userq_fence->fence_drv_array_count = 0;
+}
+
void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
{
struct amdgpu_userq_fence *userq_fence, *tmp;
+ LIST_HEAD(to_be_signaled);
struct dma_fence *fence;
unsigned long flags;
u64 rptr;
- int i;
if (!fence_drv)
return;
spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
rptr = amdgpu_userq_fence_read(fence_drv);
- list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
- fence = &userq_fence->base;
-
- if (rptr < fence->seqno)
+ list_for_each_entry(userq_fence, &fence_drv->fences, link) {
+ if (rptr < userq_fence->base.seqno)
break;
+ }
- dma_fence_signal(fence);
-
- for (i = 0; i < userq_fence->fence_drv_array_count; i++)
- amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
+ list_cut_before(&to_be_signaled, &fence_drv->fences,
+ &userq_fence->link);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
- list_del(&userq_fence->link);
+ list_for_each_entry_safe(userq_fence, tmp, &to_be_signaled, link) {
+ fence = &userq_fence->base;
+ list_del_init(&userq_fence->link);
+ dma_fence_signal(fence);
+ /* Drop fence_drv_array outside fence_list_lock
+ * to avoid the recursion lock.
+ */
+ amdgpu_userq_fence_put_fence_drv_array(userq_fence);
dma_fence_put(fence);
}
- spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
}
void amdgpu_userq_fence_driver_destroy(struct kref *ref)
struct amdgpu_userq_fence_driver *fence_drv;
struct dma_fence *fence;
unsigned long flags;
+ bool signaled = false;
fence_drv = userq->fence_drv;
if (!fence_drv)
/* Check if hardware has already processed the job */
spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
- if (!dma_fence_is_signaled(fence))
+ if (!dma_fence_is_signaled(fence)) {
list_add_tail(&userq_fence->link, &fence_drv->fences);
- else
+ } else {
+ signaled = true;
dma_fence_put(fence);
-
+ }
spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+ if (signaled)
+ amdgpu_userq_fence_put_fence_drv_array(userq_fence);
+
*f = fence;
return 0;