]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/amdgpu/vcn: Allow limiting ctx to instance 0 for AV1 at any time
authorDavid Rosca <david.rosca@amd.com>
Mon, 18 Aug 2025 07:18:37 +0000 (09:18 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 9 Sep 2025 20:42:26 +0000 (16:42 -0400)
There is no reason to require this to happen on first submitted IB only.
We need to wait for the queue to be idle, but it can be done at any
time (including when there are multiple video sessions active).

Signed-off-by: David Rosca <david.rosca@amd.com>
Reviewed-by: Leo Liu <leo.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 8908fdce0634a623404e9923ed2f536101a39db5)
Cc: stable@vger.kernel.org
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c

index 4b8f4407047fc081ff359aad1447d53d58c609a1..2811226b0ea5dca92d8d4b55ac5f7956e76e72f7 100644 (file)
@@ -1888,15 +1888,19 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
                                struct amdgpu_job *job)
 {
        struct drm_gpu_scheduler **scheds;
-
-       /* The create msg must be in the first IB submitted */
-       if (atomic_read(&job->base.entity->fence_seq))
-               return -EINVAL;
+       struct dma_fence *fence;
 
        /* if VCN0 is harvested, we can't support AV1 */
        if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
                return -EINVAL;
 
+       /* wait for all jobs to finish before switching to instance 0 */
+       fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
+       if (fence) {
+               dma_fence_wait(fence, false);
+               dma_fence_put(fence);
+       }
+
        scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
                [AMDGPU_RING_PRIO_DEFAULT].sched;
        drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
index 311fb595bd693066833f1a6499a53675f3587bb9..706f3b2f484f7c0a36ac76fd5934eeb2556012c2 100644 (file)
@@ -1808,15 +1808,19 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
                                struct amdgpu_job *job)
 {
        struct drm_gpu_scheduler **scheds;
-
-       /* The create msg must be in the first IB submitted */
-       if (atomic_read(&job->base.entity->fence_seq))
-               return -EINVAL;
+       struct dma_fence *fence;
 
        /* if VCN0 is harvested, we can't support AV1 */
        if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
                return -EINVAL;
 
+       /* wait for all jobs to finish before switching to instance 0 */
+       fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
+       if (fence) {
+               dma_fence_wait(fence, false);
+               dma_fence_put(fence);
+       }
+
        scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
                [AMDGPU_RING_PRIO_0].sched;
        drm_sched_entity_modify_sched(job->base.entity, scheds, 1);