/** @reqs_acked: GPU request wait queue. */
wait_queue_head_t reqs_acked;
+
+ /** @cache_flush_lock: Lock to serialize cache flushes */
+ struct mutex cache_flush_lock;
};
#define GPU_INTERRUPTS_MASK \
spin_lock_init(&gpu->reqs_lock);
init_waitqueue_head(&gpu->reqs_acked);
+ mutex_init(&gpu->cache_flush_lock);
ptdev->gpu = gpu;
dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
bool timedout = false;
unsigned long flags;
+ /* Serialize cache flush operations. */
+ guard(mutex)(&ptdev->gpu->cache_flush_lock);
+
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if (!drm_WARN_ON(&ptdev->base,
ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {