If we haven't written the submit into the ringbuffer yet, then drop it.
The submit still retires through the normal path, to preserve fence
signalling order, but we can skip the IB's to userspace cmdstream.
Signed-off-by: Rob Clark <robdclark@chromium.org>
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
Tested-by: Antonino Maniscalco <antomani103@gmail.com>
Reviewed-by: Antonino Maniscalco <antomani103@gmail.com>
Patchwork: https://patchwork.freedesktop.org/patch/661489/
static void context_close(struct msm_context *ctx)
{
+ ctx->closed = true;
msm_submitqueue_close(ctx);
msm_context_put(ctx);
}
*/
int queueid;
+ /**
+ * @closed: The device file associated with this context has been closed.
+ *
+ * Once the device is closed, any submits that have not been written
+ * to the ring buffer are no-op'd.
+ */
+ bool closed;
+
/** @vm: the per-process GPU address-space */
struct drm_gpuvm *vm;
struct msm_fence_context *fctx = submit->ring->fctx;
struct msm_gpu *gpu = submit->gpu;
struct msm_drm_private *priv = gpu->dev->dev_private;
+ unsigned nr_cmds = submit->nr_cmds;
int i;
msm_fence_init(submit->hw_fence, fctx);
/* TODO move submit path over to using a per-ring lock.. */
mutex_lock(&gpu->lock);
+ if (submit->queue->ctx->closed)
+ submit->nr_cmds = 0;
+
msm_gpu_submit(gpu, submit);
+ submit->nr_cmds = nr_cmds;
+
mutex_unlock(&gpu->lock);
return dma_fence_get(submit->hw_fence);