]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/drm_client: Use scope-based cleanup
authorMatt Roper <matthew.d.roper@intel.com>
Tue, 18 Nov 2025 16:43:54 +0000 (08:43 -0800)
committerMatt Roper <matthew.d.roper@intel.com>
Wed, 19 Nov 2025 19:58:58 +0000 (11:58 -0800)
Use scope-based cleanup for forcewake and runtime PM.

v2:
 - Use xe_force_wake_release_only rather than a custom one-off class for
   "any engine" forcewake.  (Gustavo)

Reviewed-by: Gustavo Sousa <gustavo.sousa@intel.com>
Link: https://patch.msgid.link/20251118164338.3572146-44-matthew.d.roper@intel.com
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
drivers/gpu/drm/xe/xe_drm_client.c

index 78551832723bf4498e2102c6d983958ea3589926..2787bbb36141f3d63960213a95d2e74451959765 100644 (file)
@@ -321,7 +321,6 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
        struct xe_hw_engine *hwe;
        struct xe_exec_queue *q;
        u64 gpu_timestamp;
-       struct xe_force_wake_ref fw_ref;
 
        /*
         * RING_TIMESTAMP registers are inaccessible in VF mode.
@@ -338,30 +337,26 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
        wait_var_event(&xef->exec_queue.pending_removal,
                       !atomic_read(&xef->exec_queue.pending_removal));
 
-       xe_pm_runtime_get(xe);
-       fw_ref = force_wake_get_any_engine(xe, &hwe);
-       if (!hwe) {
-               xe_pm_runtime_put(xe);
-               return;
-       }
-
-       /* Accumulate all the exec queues from this client */
-       mutex_lock(&xef->exec_queue.lock);
-       xa_for_each(&xef->exec_queue.xa, i, q) {
-               xe_exec_queue_get(q);
-               mutex_unlock(&xef->exec_queue.lock);
-
-               xe_exec_queue_update_run_ticks(q);
+       scoped_guard(xe_pm_runtime, xe) {
+               CLASS(xe_force_wake_release_only, fw_ref)(force_wake_get_any_engine(xe, &hwe));
+               if (!hwe)
+                       return;
 
+               /* Accumulate all the exec queues from this client */
                mutex_lock(&xef->exec_queue.lock);
-               xe_exec_queue_put(q);
-       }
-       mutex_unlock(&xef->exec_queue.lock);
+               xa_for_each(&xef->exec_queue.xa, i, q) {
+                       xe_exec_queue_get(q);
+                       mutex_unlock(&xef->exec_queue.lock);
 
-       gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
+                       xe_exec_queue_update_run_ticks(q);
 
-       xe_force_wake_put(gt_to_fw(hwe->gt), fw_ref.domains);
-       xe_pm_runtime_put(xe);
+                       mutex_lock(&xef->exec_queue.lock);
+                       xe_exec_queue_put(q);
+               }
+               mutex_unlock(&xef->exec_queue.lock);
+
+               gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
+       }
 
        for (class = 0; class < XE_ENGINE_CLASS_MAX; class++) {
                const char *class_name;