]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/display: Use scoped-cleanup
authorMatt Roper <matthew.d.roper@intel.com>
Tue, 18 Nov 2025 16:43:52 +0000 (08:43 -0800)
committerMatt Roper <matthew.d.roper@intel.com>
Wed, 19 Nov 2025 19:58:57 +0000 (11:58 -0800)
Eliminate some goto-based cleanup by utilizing scoped cleanup helpers.

v2:
 - Eliminate unnecessary 'ret' variable in intel_hdcp_gsc_check_status()
   (Gustavo)

Reviewed-by: Gustavo Sousa <gustavo.sousa@intel.com>
Link: https://patch.msgid.link/20251118164338.3572146-42-matthew.d.roper@intel.com
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
drivers/gpu/drm/xe/display/xe_fb_pin.c
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c

index 1fd4a815e784be55b9f0e40b7548ba85037e858e..6a935a75f2a40aaf2d25c3980c64b953a876c81e 100644 (file)
@@ -210,10 +210,11 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
        /* TODO: Consider sharing framebuffer mapping?
         * embed i915_vma inside intel_framebuffer
         */
-       xe_pm_runtime_get_noresume(xe);
-       ret = mutex_lock_interruptible(&ggtt->lock);
+       guard(xe_pm_runtime_noresume)(xe);
+       ACQUIRE(mutex_intr, lock)(&ggtt->lock);
+       ret = ACQUIRE_ERR(mutex_intr, &lock);
        if (ret)
-               goto out;
+               return ret;
 
        align = XE_PAGE_SIZE;
        if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
@@ -223,15 +224,13 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
                vma->node = bo->ggtt_node[tile0->id];
        } else if (view->type == I915_GTT_VIEW_NORMAL) {
                vma->node = xe_ggtt_node_init(ggtt);
-               if (IS_ERR(vma->node)) {
-                       ret = PTR_ERR(vma->node);
-                       goto out_unlock;
-               }
+               if (IS_ERR(vma->node))
+                       return PTR_ERR(vma->node);
 
                ret = xe_ggtt_node_insert_locked(vma->node, xe_bo_size(bo), align, 0);
                if (ret) {
                        xe_ggtt_node_fini(vma->node);
-                       goto out_unlock;
+                       return ret;
                }
 
                xe_ggtt_map_bo(ggtt, vma->node, bo, xe->pat.idx[XE_CACHE_NONE]);
@@ -245,13 +244,13 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
                vma->node = xe_ggtt_node_init(ggtt);
                if (IS_ERR(vma->node)) {
                        ret = PTR_ERR(vma->node);
-                       goto out_unlock;
+                       return ret;
                }
 
                ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
                if (ret) {
                        xe_ggtt_node_fini(vma->node);
-                       goto out_unlock;
+                       return ret;
                }
 
                ggtt_ofs = vma->node->base.start;
@@ -265,10 +264,6 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
                                           rot_info->plane[i].dst_stride);
        }
 
-out_unlock:
-       mutex_unlock(&ggtt->lock);
-out:
-       xe_pm_runtime_put(xe);
        return ret;
 }
 
index 4ae847b628e23025231f6d89c08d614131216048..71d21fde1736723f01dc50ec2666664e867cf70a 100644 (file)
@@ -36,8 +36,6 @@ bool intel_hdcp_gsc_check_status(struct drm_device *drm)
        struct xe_tile *tile = xe_device_get_root_tile(xe);
        struct xe_gt *gt = tile->media_gt;
        struct xe_gsc *gsc = &gt->uc.gsc;
-       bool ret = true;
-       unsigned int fw_ref;
 
        if (!gsc || !xe_uc_fw_is_enabled(&gsc->fw)) {
                drm_dbg_kms(&xe->drm,
@@ -45,22 +43,15 @@ bool intel_hdcp_gsc_check_status(struct drm_device *drm)
                return false;
        }
 
-       xe_pm_runtime_get(xe);
-       fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
-       if (!fw_ref) {
+       guard(xe_pm_runtime)(xe);
+       CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GSC);
+       if (!fw_ref.domains) {
                drm_dbg_kms(&xe->drm,
                            "failed to get forcewake to check proxy status\n");
-               ret = false;
-               goto out;
+               return false;
        }
 
-       if (!xe_gsc_proxy_init_done(gsc))
-               ret = false;
-
-       xe_force_wake_put(gt_to_fw(gt), fw_ref);
-out:
-       xe_pm_runtime_put(xe);
-       return ret;
+       return xe_gsc_proxy_init_done(gsc);
 }
 
 /*This function helps allocate memory for the command that we will send to gsc cs */
@@ -166,17 +157,15 @@ ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
        u32 addr_out_off, addr_in_wr_off = 0;
        int ret, tries = 0;
 
-       if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) {
-               ret = -ENOSPC;
-               goto out;
-       }
+       if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
+               return -ENOSPC;
 
        msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
        msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
        addr_out_off = PAGE_SIZE;
 
        host_session_id = xe_gsc_create_host_session_id();
-       xe_pm_runtime_get_noresume(xe);
+       guard(xe_pm_runtime_noresume)(xe);
        addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap,
                                            addr_in_wr_off, HECI_MEADDRESS_HDCP,
                                            host_session_id, msg_in_len);
@@ -201,13 +190,11 @@ ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
        } while (++tries < 20);
 
        if (ret)
-               goto out;
+               return ret;
 
        xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap,
                           addr_out_off + HDCP_GSC_HEADER_SIZE,
                           msg_out_len);
 
-out:
-       xe_pm_runtime_put(xe);
        return ret;
 }