]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/amdgpu: reject gang submit on reserved VMIDs
authorChristian König <christian.koenig@amd.com>
Fri, 19 Jan 2024 13:57:29 +0000 (14:57 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 12 Sep 2024 09:11:37 +0000 (11:11 +0200)
[ Upstream commit 320debca1ba3a81c87247eac84eff976ead09ee0 ]

A gang submit won't work if the VMID is reserved and we can't flush out
VM changes from multiple engines at the same time.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h

index 61668a784315fcfc03837c726cc6658edeca9135..e361dc37a0890b3cce6b00613f84aafe6d09ec13 100644 (file)
@@ -1096,6 +1096,21 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        unsigned int i;
        int r;
 
+       /*
+        * We can't use gang submit on with reserved VMIDs when the VM changes
+        * can't be invalidated by more than one engine at the same time.
+        */
+       if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
+               for (i = 0; i < p->gang_size; ++i) {
+                       struct drm_sched_entity *entity = p->entities[i];
+                       struct drm_gpu_scheduler *sched = entity->rq->sched;
+                       struct amdgpu_ring *ring = to_amdgpu_ring(sched);
+
+                       if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
+                               return -EINVAL;
+               }
+       }
+
        r = amdgpu_vm_clear_freed(adev, vm, NULL);
        if (r)
                return r;
index ff1ea99292fbf0185274b08c5b1b2c33689e0a44..69dfc699d78b076eae17294c3b76689c7d399bbd 100644 (file)
@@ -409,7 +409,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        if (r || !idle)
                goto error;
 
-       if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) {
+       if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
                r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
                if (r || !id)
                        goto error;
@@ -459,6 +459,19 @@ error:
        return r;
 }
 
+/*
+ * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
+ * @vm: the VM to check
+ * @vmhub: the VMHUB which will be used
+ *
+ * Returns: True if the VM will use a reserved VMID.
+ */
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
+{
+       return vm->reserved_vmid[vmhub] ||
+               (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
+}
+
 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
                               unsigned vmhub)
 {
index fa8c42c83d5d26bc6d90455b5e7b51b627c5b5d7..240fa675126029a0050a44cd4d5065eefcf70bef 100644 (file)
@@ -78,6 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 
 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
                               struct amdgpu_vmid *id);
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
                                unsigned vmhub);
 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,