]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/display: handle HPD polling in display runtime suspend/resume
authorVinod Govindapillai <vinod.govindapillai@intel.com>
Fri, 23 Aug 2024 11:21:48 +0000 (14:21 +0300)
committerVinod Govindapillai <vinod.govindapillai@intel.com>
Fri, 23 Aug 2024 19:10:55 +0000 (22:10 +0300)
In XE, display runtime suspend / resume routines are called only
if d3cold is allowed. This makes the driver unable to detect any
HPDs once the device goes into runtime suspend state in platforms
like LNL. Update the display runtime suspend / resume routines
to include HPD polling regardless of d3cold status.

While xe_display_pm_suspend/resume() performs steps during runtime
suspend/resume that shouldn't happen, like suspending MST and they
are missing other steps like enabling DC9, this patchset is meant
to keep the current behavior wrt. these, leaving the corresponding
updates for a follow-up

v2: have a separate function for display runtime s/r (Rodrigo)

v3: better streamlining of system s/r and runtime s/r calls (Imre)

v4: rebased

Reviewed-by: Arun R Murthy <arun.r.murthy@intel.com>
Signed-off-by: Vinod Govindapillai <vinod.govindapillai@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240823112148.327015-4-vinod.govindapillai@intel.com
drivers/gpu/drm/xe/display/xe_display.c
drivers/gpu/drm/xe/display/xe_display.h
drivers/gpu/drm/xe/xe_pm.c

index 00dd38f6177b1c198c86c674f18bf258b29e78b3..710b1e2170c13a80721ad4095648c918b1bd40a6 100644 (file)
@@ -308,6 +308,18 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe)
        }
 }
 
+/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
+void xe_display_pm_runtime_suspend(struct xe_device *xe)
+{
+       if (!xe->info.probe_display)
+               return;
+
+       if (xe->d3cold.allowed)
+               xe_display_pm_suspend(xe, true);
+
+       intel_hpd_poll_enable(xe);
+}
+
 void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
 {
        struct intel_display *display = &xe->display;
@@ -354,6 +366,17 @@ void xe_display_pm_suspend_late(struct xe_device *xe)
        intel_display_power_suspend_late(xe);
 }
 
+void xe_display_pm_runtime_resume(struct xe_device *xe)
+{
+       if (!xe->info.probe_display)
+               return;
+
+       intel_hpd_poll_disable(xe);
+
+       if (xe->d3cold.allowed)
+               xe_display_pm_resume(xe, true);
+}
+
 void xe_display_pm_resume_early(struct xe_device *xe)
 {
        if (!xe->info.probe_display)
index 000fb5799df54f8b886fb37f3e00b89450c4e4bc..53d727fd792b4bfd369340b1a93f6afe26af27ce 100644 (file)
@@ -38,6 +38,8 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime);
 void xe_display_pm_suspend_late(struct xe_device *xe);
 void xe_display_pm_resume_early(struct xe_device *xe);
 void xe_display_pm_resume(struct xe_device *xe, bool runtime);
+void xe_display_pm_runtime_suspend(struct xe_device *xe);
+void xe_display_pm_runtime_resume(struct xe_device *xe);
 
 #else
 
@@ -67,6 +69,8 @@ static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {}
 static inline void xe_display_pm_suspend_late(struct xe_device *xe) {}
 static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
 static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {}
+static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {}
+static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {}
 
 #endif /* CONFIG_DRM_XE_DISPLAY */
 #endif /* _XE_DISPLAY_H_ */
index fcfb49af8c89130582149c5f4712be511af2b8b9..c247e1cb8aba12d570840adf0350fdabcd3b6ae0 100644 (file)
@@ -366,9 +366,9 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
                xe_bo_runtime_pm_release_mmap_offset(bo);
        mutex_unlock(&xe->mem_access.vram_userfault.lock);
 
-       if (xe->d3cold.allowed) {
-               xe_display_pm_suspend(xe, true);
+       xe_display_pm_runtime_suspend(xe);
 
+       if (xe->d3cold.allowed) {
                err = xe_bo_evict_all(xe);
                if (err)
                        goto out;
@@ -431,12 +431,14 @@ int xe_pm_runtime_resume(struct xe_device *xe)
        for_each_gt(gt, xe, id)
                xe_gt_resume(gt);
 
+       xe_display_pm_runtime_resume(xe);
+
        if (xe->d3cold.allowed) {
-               xe_display_pm_resume(xe, true);
                err = xe_bo_restore_user(xe);
                if (err)
                        goto out;
        }
+
 out:
        lock_map_release(&xe_pm_runtime_lockdep_map);
        xe_pm_write_callback_task(xe, NULL);