]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdkfd: Move the process suspend and resume out of full access
authorEmily Deng <Emily.Deng@amd.com>
Tue, 27 May 2025 03:42:11 +0000 (11:42 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 18 Jun 2025 16:19:19 +0000 (12:19 -0400)
For the suspend and resume process, exclusive access is not required.
Therefore, it can be moved out of the full access section to reduce the
duration of exclusive access.

v3:
Move suspend processes before hardware fini.
Remove twice call for bare metal.

v4:
Refine code

Signed-off-by: Emily Deng <Emily.Deng@amd.com>
Acked-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c

index 652389d0d4e665f663d097ec675d52e2a2a42dbd..3d89367904b79269c9b67601808fcbef2ff1c5d5 100644 (file)
@@ -248,18 +248,34 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
                kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
 }
 
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
 {
        if (adev->kfd.dev)
-               kgd2kfd_suspend(adev->kfd.dev, run_pm);
+               kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
 }
 
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
 {
        int r = 0;
 
        if (adev->kfd.dev)
-               r = kgd2kfd_resume(adev->kfd.dev, run_pm);
+               r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+
+       return r;
+}
+
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev)
+{
+       if (adev->kfd.dev)
+               kgd2kfd_suspend_process(adev->kfd.dev);
+}
+
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev)
+{
+       int r = 0;
+
+       if (adev->kfd.dev)
+               r = kgd2kfd_resume_process(adev->kfd.dev);
 
        return r;
 }
index 3d5812269ea0d28b985850679e22c50d0ede9816..33eb4826b58b1a84a410b112b38e777064ccf9af 100644 (file)
@@ -154,8 +154,10 @@ struct amdkfd_process_info {
 int amdgpu_amdkfd_init(void);
 void amdgpu_amdkfd_fini(void);
 
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc);
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev);
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev);
 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
                        const void *ih_ring_entry);
 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -411,8 +413,10 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
 bool kgd2kfd_device_init(struct kfd_dev *kfd,
                         const struct kgd2kfd_shared_resources *gpu_resources);
 void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc);
+void kgd2kfd_suspend_process(struct kfd_dev *kfd);
+int kgd2kfd_resume_process(struct kfd_dev *kfd);
 int kgd2kfd_pre_reset(struct kfd_dev *kfd,
                      struct amdgpu_reset_context *reset_context);
 int kgd2kfd_post_reset(struct kfd_dev *kfd);
@@ -454,11 +458,20 @@ static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
 {
 }
 
-static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
 {
 }
 
-static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
+{
+       return 0;
+}
+
+static inline void kgd2kfd_suspend_process(struct kfd_dev *kfd)
+{
+}
+
+static inline int kgd2kfd_resume_process(struct kfd_dev *kfd)
 {
        return 0;
 }
index ffbaa8bc5eea9ea2ee23ec595eedc08ea97666ab..1105a09e55dc18309c47d01f652f07e6dda399d2 100644 (file)
@@ -320,7 +320,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
        if (!down_read_trylock(&adev->reset_domain->sem))
                return;
 
-       amdgpu_amdkfd_suspend(adev, false);
+       amdgpu_amdkfd_suspend(adev, true);
 
        if (suspend_resume_compute_scheduler(adev, true))
                goto out;
@@ -333,7 +333,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
 out:
        suspend_resume_compute_scheduler(adev, false);
 
-       amdgpu_amdkfd_resume(adev, false);
+       amdgpu_amdkfd_resume(adev, true);
 
        up_read(&adev->reset_domain->sem);
 }
index d9b1bd97491fcc053a952ee17592c3ae3122723a..e0a85c98f249292a3735cecd2a77e2a4ac0e846b 100644 (file)
@@ -3524,7 +3524,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
        amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
        amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
 
-       amdgpu_amdkfd_suspend(adev, false);
+       amdgpu_amdkfd_suspend(adev, true);
        amdgpu_userq_suspend(adev);
 
        /* Workaround for ASICs need to disable SMC first */
@@ -5083,6 +5083,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
        adev->in_suspend = true;
 
        if (amdgpu_sriov_vf(adev)) {
+               if (!adev->in_s0ix && !adev->in_runpm)
+                       amdgpu_amdkfd_suspend_process(adev);
                amdgpu_virt_fini_data_exchange(adev);
                r = amdgpu_virt_request_full_gpu(adev, false);
                if (r)
@@ -5102,7 +5104,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
        amdgpu_device_ip_suspend_phase1(adev);
 
        if (!adev->in_s0ix) {
-               amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+               amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
                amdgpu_userq_suspend(adev);
        }
 
@@ -5200,7 +5202,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
        }
 
        if (!adev->in_s0ix) {
-               r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
+               r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
                if (r)
                        goto exit;
 
@@ -5219,6 +5221,9 @@ exit:
        if (amdgpu_sriov_vf(adev)) {
                amdgpu_virt_init_data_exchange(adev);
                amdgpu_virt_release_full_gpu(adev, true);
+
+               if (!adev->in_s0ix && !r && !adev->in_runpm)
+                       r = amdgpu_amdkfd_resume_process(adev);
        }
 
        if (r)
index 35b0a7fb37b96715d5acc522652040697b2bf0bd..e6af50c947a12388abf034f3e4b6e66fe2d1799d 100644 (file)
@@ -1679,9 +1679,9 @@ static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
        if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
                return -EOPNOTSUPP;
 
-       amdgpu_amdkfd_suspend(adev, false);
+       amdgpu_amdkfd_suspend(adev, true);
        r = amdgpu_sdma_reset_engine(adev, id);
-       amdgpu_amdkfd_resume(adev, false);
+       amdgpu_amdkfd_resume(adev, true);
 
        return r;
 }
index a12e1433943d11cc9489d07cf4a5235e670d4372..7e749f9b6d69daa7043e7ec081db56ba20e5eef5 100644 (file)
@@ -971,7 +971,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd,
                kfd_smi_event_update_gpu_reset(node, false, reset_context);
        }
 
-       kgd2kfd_suspend(kfd, false);
+       kgd2kfd_suspend(kfd, true);
 
        for (i = 0; i < kfd->num_nodes; i++)
                kfd_signal_reset_event(kfd->nodes[i]);
@@ -1039,7 +1039,7 @@ bool kfd_is_locked(struct kfd_dev *kfd)
        return false;
 }
 
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
 {
        struct kfd_node *node;
        int i;
@@ -1047,14 +1047,8 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
        if (!kfd->init_complete)
                return;
 
-       /* for runtime suspend, skip locking kfd */
-       if (!run_pm) {
-               mutex_lock(&kfd_processes_mutex);
-               /* For first KFD device suspend all the KFD processes */
-               if (++kfd_locked == 1)
-                       kfd_suspend_all_processes();
-               mutex_unlock(&kfd_processes_mutex);
-       }
+       if (suspend_proc)
+               kgd2kfd_suspend_process(kfd);
 
        for (i = 0; i < kfd->num_nodes; i++) {
                node = kfd->nodes[i];
@@ -1062,7 +1056,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
        }
 }
 
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
 {
        int ret, i;
 
@@ -1075,14 +1069,36 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
                        return ret;
        }
 
-       /* for runtime resume, skip unlocking kfd */
-       if (!run_pm) {
-               mutex_lock(&kfd_processes_mutex);
-               if (--kfd_locked == 0)
-                       ret = kfd_resume_all_processes();
-               WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
-               mutex_unlock(&kfd_processes_mutex);
-       }
+       if (resume_proc)
+               ret = kgd2kfd_resume_process(kfd);
+
+       return ret;
+}
+
+void kgd2kfd_suspend_process(struct kfd_dev *kfd)
+{
+       if (!kfd->init_complete)
+               return;
+
+       mutex_lock(&kfd_processes_mutex);
+       /* For first KFD device suspend all the KFD processes */
+       if (++kfd_locked == 1)
+               kfd_suspend_all_processes();
+       mutex_unlock(&kfd_processes_mutex);
+}
+
+int kgd2kfd_resume_process(struct kfd_dev *kfd)
+{
+       int ret = 0;
+
+       if (!kfd->init_complete)
+               return 0;
+
+       mutex_lock(&kfd_processes_mutex);
+       if (--kfd_locked == 0)
+               ret = kfd_resume_all_processes();
+       WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
+       mutex_unlock(&kfd_processes_mutex);
 
        return ret;
 }