Some driver use fence->ops to test if a fence was initialized or not.
The problem is that this utilizes internal behavior of the dma_fence
implementation.
So better abstract that into a function.
v2: use a flag instead of testing fence->ops, rename the function, move
to the beginning of the patch set.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Link: https://lore.kernel.org/r/20260120105655.7134-2-christian.koenig@amd.com
fence->lock = lock;
fence->context = context;
fence->seqno = seqno;
- fence->flags = flags;
+ fence->flags = flags | BIT(DMA_FENCE_FLAG_INITIALIZED_BIT);
fence->error = 0;
trace_dma_fence_init(fence);
unsigned i;
/* Check if any fences were initialized */
- if (job->base.s_fence && job->base.s_fence->finished.ops)
+ if (job->base.s_fence &&
+ dma_fence_was_initialized(&job->base.s_fence->finished))
f = &job->base.s_fence->finished;
- else if (job->hw_fence && job->hw_fence->base.ops)
+ else if (dma_fence_was_initialized(&job->hw_fence->base))
f = &job->hw_fence->base;
else
f = NULL;
amdgpu_sync_free(&job->explicit_sync);
- if (job->hw_fence->base.ops)
+ if (dma_fence_was_initialized(&job->hw_fence->base))
dma_fence_put(&job->hw_fence->base);
else
kfree(job->hw_fence);
- if (job->hw_vm_fence->base.ops)
+ if (dma_fence_was_initialized(&job->hw_vm_fence->base))
dma_fence_put(&job->hw_vm_fence->base);
else
kfree(job->hw_vm_fence);
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
- if (job->hw_fence->base.ops)
+ if (dma_fence_was_initialized(&job->hw_fence->base))
dma_fence_put(&job->hw_fence->base);
else
kfree(job->hw_fence);
- if (job->hw_vm_fence->base.ops)
+ if (dma_fence_was_initialized(&job->hw_vm_fence->base))
dma_fence_put(&job->hw_vm_fence->base);
else
kfree(job->hw_vm_fence);
idr_remove(&qdev->release_idr, release->id);
spin_unlock(&qdev->release_idr_lock);
- if (release->base.ops) {
+ if (dma_fence_was_initialized(&release->base)) {
WARN_ON(list_empty(&release->bos));
qxl_release_free_list(release);
* atomic ops (bit_*), so taking the spinlock will not be needed most
* of the time.
*
+ * DMA_FENCE_FLAG_INITIALIZED_BIT - fence was initialized
* DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
* DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling
* DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
};
enum dma_fence_flag_bits {
+ DMA_FENCE_FLAG_INITIALIZED_BIT,
DMA_FENCE_FLAG_SEQNO64_BIT,
DMA_FENCE_FLAG_SIGNALED_BIT,
DMA_FENCE_FLAG_TIMESTAMP_BIT,
void dma_fence_free(struct dma_fence *fence);
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
+/**
+ * dma_fence_was_initialized - test if fence was initialized
+ * @fence: fence to test
+ *
+ * Return: True if fence was ever initialized, false otherwise. Works correctly
+ * only when memory backing the fence structure is zero initialized on
+ * allocation.
+ */
+static inline bool dma_fence_was_initialized(struct dma_fence *fence)
+{
+ return fence && test_bit(DMA_FENCE_FLAG_INITIALIZED_BIT, &fence->flags);
+}
+
/**
* dma_fence_put - decreases refcount of the fence
* @fence: fence to reduce refcount of