]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/xe/oa: Separate batch submission from waiting for completion
authorAshutosh Dixit <ashutosh.dixit@intel.com>
Tue, 22 Oct 2024 20:03:46 +0000 (13:03 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 Feb 2025 12:30:13 +0000 (04:30 -0800)
[ Upstream commit dddcb19ad4d4bbe943a72a1fb3266c6e8aa8d541 ]

When we introduce xe_syncs, we don't wait for internal OA programming
batches to complete. That is, xe_syncs are signaled asynchronously. In
anticipation for this, separate out batch submission from waiting for
completion of those batches.

v2: Change return type of xe_oa_submit_bb to "struct dma_fence *" (Matt B)
v3: Retain init "int err = 0;" in xe_oa_submit_bb (Jose)

Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241022200352.1192560-2-ashutosh.dixit@intel.com
Stable-dep-of: f0ed39830e60 ("xe/oa: Fix query mode of operation for OAR/OAC")
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/gpu/drm/xe/xe_oa.c

index 6fc00d63b2857ff489f651d027e41ae6f620d585..3328529774cb7103fa55383d742b2b1137d7db14 100644 (file)
@@ -567,11 +567,10 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait)
        return ret;
 }
 
-static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb)
+static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb)
 {
        struct xe_sched_job *job;
        struct dma_fence *fence;
-       long timeout;
        int err = 0;
 
        /* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
@@ -585,14 +584,9 @@ static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb)
        fence = dma_fence_get(&job->drm.s_fence->finished);
        xe_sched_job_push(job);
 
-       timeout = dma_fence_wait_timeout(fence, false, HZ);
-       dma_fence_put(fence);
-       if (timeout < 0)
-               err = timeout;
-       else if (!timeout)
-               err = -ETIME;
+       return fence;
 exit:
-       return err;
+       return ERR_PTR(err);
 }
 
 static void write_cs_mi_lri(struct xe_bb *bb, const struct xe_oa_reg *reg_data, u32 n_regs)
@@ -656,6 +650,7 @@ static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
 static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
                                  const struct flex *flex, u32 count)
 {
+       struct dma_fence *fence;
        struct xe_bb *bb;
        int err;
 
@@ -667,7 +662,16 @@ static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lr
 
        xe_oa_store_flex(stream, lrc, bb, flex, count);
 
-       err = xe_oa_submit_bb(stream, bb);
+       fence = xe_oa_submit_bb(stream, bb);
+       if (IS_ERR(fence)) {
+               err = PTR_ERR(fence);
+               goto free_bb;
+       }
+       xe_bb_free(bb, fence);
+       dma_fence_put(fence);
+
+       return 0;
+free_bb:
        xe_bb_free(bb, NULL);
 exit:
        return err;
@@ -675,6 +679,7 @@ exit:
 
 static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
 {
+       struct dma_fence *fence;
        struct xe_bb *bb;
        int err;
 
@@ -686,7 +691,16 @@ static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *re
 
        write_cs_mi_lri(bb, reg_lri, 1);
 
-       err = xe_oa_submit_bb(stream, bb);
+       fence = xe_oa_submit_bb(stream, bb);
+       if (IS_ERR(fence)) {
+               err = PTR_ERR(fence);
+               goto free_bb;
+       }
+       xe_bb_free(bb, fence);
+       dma_fence_put(fence);
+
+       return 0;
+free_bb:
        xe_bb_free(bb, NULL);
 exit:
        return err;
@@ -914,15 +928,32 @@ static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config
 {
 #define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
        struct xe_oa_config_bo *oa_bo;
-       int err, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
+       int err = 0, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
+       struct dma_fence *fence;
+       long timeout;
 
+       /* Emit OA configuration batch */
        oa_bo = xe_oa_alloc_config_buffer(stream, config);
        if (IS_ERR(oa_bo)) {
                err = PTR_ERR(oa_bo);
                goto exit;
        }
 
-       err = xe_oa_submit_bb(stream, oa_bo->bb);
+       fence = xe_oa_submit_bb(stream, oa_bo->bb);
+       if (IS_ERR(fence)) {
+               err = PTR_ERR(fence);
+               goto exit;
+       }
+
+       /* Wait till all previous batches have executed */
+       timeout = dma_fence_wait_timeout(fence, false, 5 * HZ);
+       dma_fence_put(fence);
+       if (timeout < 0)
+               err = timeout;
+       else if (!timeout)
+               err = -ETIME;
+       if (err)
+               drm_dbg(&stream->oa->xe->drm, "dma_fence_wait_timeout err %d\n", err);
 
        /* Additional empirical delay needed for NOA programming after registers are written */
        usleep_range(us, 2 * us);