]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/device: Use scope-based cleanup
authorMatt Roper <matthew.d.roper@intel.com>
Tue, 18 Nov 2025 16:43:50 +0000 (08:43 -0800)
committerMatt Roper <matthew.d.roper@intel.com>
Wed, 19 Nov 2025 19:58:57 +0000 (11:58 -0800)
Convert device code to use scope-based forcewake and runtime PM.

Reviewed-by: Gustavo Sousa <gustavo.sousa@intel.com>
Link: https://patch.msgid.link/20251118164338.3572146-40-matthew.d.roper@intel.com
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
drivers/gpu/drm/xe/xe_device.c

index 9f2f19dc1fd34a29b39e5423235cb6984614256d..92f883dd88776fbc98d0bcd278b371dbe8239d23 100644 (file)
@@ -166,7 +166,7 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
        struct xe_exec_queue *q;
        unsigned long idx;
 
-       xe_pm_runtime_get(xe);
+       guard(xe_pm_runtime)(xe);
 
        /*
         * No need for exec_queue.lock here as there is no contention for it
@@ -184,8 +184,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
                xe_vm_close_and_put(vm);
 
        xe_file_put(xef);
-
-       xe_pm_runtime_put(xe);
 }
 
 static const struct drm_ioctl_desc xe_ioctls[] = {
@@ -220,10 +218,10 @@ static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        if (xe_device_wedged(xe))
                return -ECANCELED;
 
-       ret = xe_pm_runtime_get_ioctl(xe);
+       ACQUIRE(xe_pm_runtime_ioctl, pm)(xe);
+       ret = ACQUIRE_ERR(xe_pm_runtime_ioctl, &pm);
        if (ret >= 0)
                ret = drm_ioctl(file, cmd, arg);
-       xe_pm_runtime_put(xe);
 
        return ret;
 }
@@ -238,10 +236,10 @@ static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo
        if (xe_device_wedged(xe))
                return -ECANCELED;
 
-       ret = xe_pm_runtime_get_ioctl(xe);
+       ACQUIRE(xe_pm_runtime_ioctl, pm)(xe);
+       ret = ACQUIRE_ERR(xe_pm_runtime_ioctl, &pm);
        if (ret >= 0)
                ret = drm_compat_ioctl(file, cmd, arg);
-       xe_pm_runtime_put(xe);
 
        return ret;
 }
@@ -775,7 +773,6 @@ ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
 static int probe_has_flat_ccs(struct xe_device *xe)
 {
        struct xe_gt *gt;
-       unsigned int fw_ref;
        u32 reg;
 
        /* Always enabled/disabled, no runtime check to do */
@@ -786,8 +783,8 @@ static int probe_has_flat_ccs(struct xe_device *xe)
        if (!gt)
                return 0;
 
-       fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
-       if (!fw_ref)
+       CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
+       if (!fw_ref.domains)
                return -ETIMEDOUT;
 
        reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
@@ -797,8 +794,6 @@ static int probe_has_flat_ccs(struct xe_device *xe)
                drm_dbg(&xe->drm,
                        "Flat CCS has been disabled in bios, May lead to performance impact");
 
-       xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
        return 0;
 }
 
@@ -1034,7 +1029,6 @@ void xe_device_wmb(struct xe_device *xe)
  */
 static void tdf_request_sync(struct xe_device *xe)
 {
-       unsigned int fw_ref;
        struct xe_gt *gt;
        u8 id;
 
@@ -1042,8 +1036,8 @@ static void tdf_request_sync(struct xe_device *xe)
                if (xe_gt_is_media_type(gt))
                        continue;
 
-               fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
-               if (!fw_ref)
+               CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
+               if (!fw_ref.domains)
                        return;
 
                xe_mmio_write32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
@@ -1058,15 +1052,12 @@ static void tdf_request_sync(struct xe_device *xe)
                if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
                                   150, NULL, false))
                        xe_gt_err_once(gt, "TD flush timeout\n");
-
-               xe_force_wake_put(gt_to_fw(gt), fw_ref);
        }
 }
 
 void xe_device_l2_flush(struct xe_device *xe)
 {
        struct xe_gt *gt;
-       unsigned int fw_ref;
 
        gt = xe_root_mmio_gt(xe);
        if (!gt)
@@ -1075,8 +1066,8 @@ void xe_device_l2_flush(struct xe_device *xe)
        if (!XE_GT_WA(gt, 16023588340))
                return;
 
-       fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
-       if (!fw_ref)
+       CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
+       if (!fw_ref.domains)
                return;
 
        spin_lock(&gt->global_invl_lock);
@@ -1086,8 +1077,6 @@ void xe_device_l2_flush(struct xe_device *xe)
                xe_gt_err_once(gt, "Global invalidation timeout\n");
 
        spin_unlock(&gt->global_invl_lock);
-
-       xe_force_wake_put(gt_to_fw(gt), fw_ref);
 }
 
 /**