]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: rename enforce isolation variables
authorAlex Deucher <alexander.deucher@amd.com>
Fri, 21 Feb 2025 20:20:45 +0000 (15:20 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 21 Apr 2025 14:56:06 +0000 (10:56 -0400)
Since they will be used for both KFD and KGD user queues,
rename them from kfd to userq.  No intended functional
change.

Acked-by: Sunil Khatri <sunil.khatri@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h

index e966aefc2b0f3e33c417b97afadc686bed0ad583..b96e0613ea7e19010f080e62a8ca6d616cf2fe7e 100644 (file)
@@ -4368,7 +4368,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                amdgpu_sync_create(&adev->isolation[i].active);
                amdgpu_sync_create(&adev->isolation[i].prev);
        }
-       mutex_init(&adev->gfx.kfd_sch_mutex);
+       mutex_init(&adev->gfx.userq_sch_mutex);
        mutex_init(&adev->gfx.workload_profile_mutex);
        mutex_init(&adev->vcn.workload_profile_mutex);
        mutex_init(&adev->userq_mutex);
index 2c933d436e5640aeb31283d8fcc0de6493bbf2c1..c58d32983c45e02e6e3def0267cbd5c7fdbc97da 100644 (file)
@@ -1947,39 +1947,40 @@ void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
 static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
                                    bool enable)
 {
-       mutex_lock(&adev->gfx.kfd_sch_mutex);
+       mutex_lock(&adev->gfx.userq_sch_mutex);
 
        if (enable) {
                /* If the count is already 0, it means there's an imbalance bug somewhere.
                 * Note that the bug may be in a different caller than the one which triggers the
                 * WARN_ON_ONCE.
                 */
-               if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
+               if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
                        dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
                        goto unlock;
                }
 
-               adev->gfx.kfd_sch_req_count[idx]--;
+               adev->gfx.userq_sch_req_count[idx]--;
 
-               if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
-                   adev->gfx.kfd_sch_inactive[idx]) {
+               if (adev->gfx.userq_sch_req_count[idx] == 0 &&
+                   adev->gfx.userq_sch_inactive[idx]) {
                        schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
                                              msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
                }
        } else {
-               if (adev->gfx.kfd_sch_req_count[idx] == 0) {
+               if (adev->gfx.userq_sch_req_count[idx] == 0) {
                        cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
-                       if (!adev->gfx.kfd_sch_inactive[idx]) {
-                               amdgpu_amdkfd_stop_sched(adev, idx);
-                               adev->gfx.kfd_sch_inactive[idx] = true;
+                       if (!adev->gfx.userq_sch_inactive[idx]) {
+                               if (adev->kfd.init_complete)
+                                       amdgpu_amdkfd_stop_sched(adev, idx);
+                               adev->gfx.userq_sch_inactive[idx] = true;
                        }
                }
 
-               adev->gfx.kfd_sch_req_count[idx]++;
+               adev->gfx.userq_sch_req_count[idx]++;
        }
 
 unlock:
-       mutex_unlock(&adev->gfx.kfd_sch_mutex);
+       mutex_unlock(&adev->gfx.userq_sch_mutex);
 }
 
 /**
@@ -2024,12 +2025,11 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
                                      msecs_to_jiffies(1));
        } else {
                /* Tell KFD to resume the runqueue */
-               if (adev->kfd.init_complete) {
-                       WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
-                       WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
+               WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
+               WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
+               if (adev->kfd.init_complete)
                        amdgpu_amdkfd_start_sched(adev, idx);
-                       adev->gfx.kfd_sch_inactive[idx] = false;
-               }
+               adev->gfx.userq_sch_inactive[idx] = false;
        }
        mutex_unlock(&adev->enforce_isolation_mutex);
 }
index 91dd365cb1e6c0242809ada46b542918f1f756ef..ed54095e6ad696b48511885c35c2d16f52521023 100644 (file)
@@ -475,9 +475,9 @@ struct amdgpu_gfx {
        bool                            enable_cleaner_shader;
        struct amdgpu_isolation_work    enforce_isolation[MAX_XCP];
        /* Mutex for synchronizing KFD scheduler operations */
-       struct mutex                    kfd_sch_mutex;
-       u64                             kfd_sch_req_count[MAX_XCP];
-       bool                            kfd_sch_inactive[MAX_XCP];
+       struct mutex                    userq_sch_mutex;
+       u64                             userq_sch_req_count[MAX_XCP];
+       bool                            userq_sch_inactive[MAX_XCP];
        unsigned long                   enforce_isolation_jiffies[MAX_XCP];
        unsigned long                   enforce_isolation_time[MAX_XCP];