From fcee6854e6fb9ddff0597296728a4f7fd67bccc9 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 18 Nov 2025 08:44:03 -0800 Subject: [PATCH] drm/xe/sriov: Use scope-based runtime PM Use scope-based runtime power management in the SRIOV code for consistency with other parts of the driver. v2: - Drop unnecessary 'ret' variables. (Gustavo) Reviewed-by: Gustavo Sousa Link: https://patch.msgid.link/20251118164338.3572146-53-matthew.d.roper@intel.com Signed-off-by: Matt Roper --- drivers/gpu/drm/xe/xe_pci_sriov.c | 10 +++------- drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c | 6 ++---- drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c | 16 ++++------------ drivers/gpu/drm/xe/xe_sriov_vf_ccs.c | 5 +---- drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c | 3 +-- 5 files changed, 11 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c index 9ff69c4843b0a..3fd22034f03e2 100644 --- a/drivers/gpu/drm/xe/xe_pci_sriov.c +++ b/drivers/gpu/drm/xe/xe_pci_sriov.c @@ -219,7 +219,6 @@ static int pf_disable_vfs(struct xe_device *xe) int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct xe_device *xe = pdev_to_xe_device(pdev); - int ret; if (!IS_SRIOV_PF(xe)) return -ENODEV; @@ -233,14 +232,11 @@ int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) if (num_vfs && pci_num_vf(pdev)) return -EBUSY; - xe_pm_runtime_get(xe); + guard(xe_pm_runtime)(xe); if (num_vfs > 0) - ret = pf_enable_vfs(xe, num_vfs); + return pf_enable_vfs(xe, num_vfs); else - ret = pf_disable_vfs(xe); - xe_pm_runtime_put(xe); - - return ret; + return pf_disable_vfs(xe); } /** diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c index bad751217e1e5..e84bdde9bc801 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c @@ -70,9 +70,8 @@ static ssize_t from_file_write_to_xe_call(struct file *file, const char __user * if (ret < 0) return ret; if (yes) { - xe_pm_runtime_get(xe); + guard(xe_pm_runtime)(xe); ret = call(xe); - xe_pm_runtime_put(xe); } if (ret < 0) return ret; @@ -209,9 +208,8 @@ static ssize_t from_file_write_to_vf_call(struct file *file, const char __user * if (ret < 0) return ret; if (yes) { - xe_pm_runtime_get(xe); + guard(xe_pm_runtime)(xe); ret = call(xe, vfid); - xe_pm_runtime_put(xe); } if (ret < 0) return ret; diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c b/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c index c0b767ac735cf..3d140506ba36b 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c @@ -389,16 +389,12 @@ static ssize_t xe_sriov_dev_attr_store(struct kobject *kobj, struct attribute *a struct xe_sriov_dev_attr *vattr = to_xe_sriov_dev_attr(attr); struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj); struct xe_device *xe = vkobj->xe; - ssize_t ret; if (!vattr->store) return -EPERM; - xe_pm_runtime_get(xe); - ret = xe_sriov_pf_wait_ready(xe) ?: vattr->store(xe, buf, count); - xe_pm_runtime_put(xe); - - return ret; + guard(xe_pm_runtime)(xe); + return xe_sriov_pf_wait_ready(xe) ?: vattr->store(xe, buf, count); } static ssize_t xe_sriov_vf_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) @@ -423,18 +419,14 @@ static ssize_t xe_sriov_vf_attr_store(struct kobject *kobj, struct attribute *at struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj); struct xe_device *xe = vkobj->xe; unsigned int vfid = vkobj->vfid; - ssize_t ret; xe_sriov_pf_assert_vfid(xe, vfid); if (!vattr->store) return -EPERM; - xe_pm_runtime_get(xe); - ret = xe_sriov_pf_wait_ready(xe) ?: vattr->store(xe, vfid, buf, count); - xe_pm_runtime_get(xe); - - return ret; + guard(xe_pm_runtime)(xe); + return xe_sriov_pf_wait_ready(xe) ?: vattr->store(xe, vfid, buf, count); } static const struct sysfs_ops xe_sriov_dev_sysfs_ops = { diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c index 33f4238604e11..052a5071e69ff 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c @@ -477,8 +477,7 @@ void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p) if (!IS_VF_CCS_READY(xe)) return; - xe_pm_runtime_get(xe); - + guard(xe_pm_runtime)(xe); for_each_ccs_rw_ctx(ctx_id) { bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool; if (!bb_pool) @@ -489,6 +488,4 @@ void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p) drm_suballoc_dump_debug_info(&bb_pool->base, p, xe_sa_manager_gpu_addr(bb_pool)); drm_puts(p, "\n"); } - - xe_pm_runtime_put(xe); } diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c index f3f478f14ff59..7f97db2f89bbc 100644 --- a/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c @@ -141,12 +141,11 @@ static int NAME##_set(void *data, u64 val) \ if (val > (TYPE)~0ull) \ return -EOVERFLOW; \ \ - xe_pm_runtime_get(xe); \ + guard(xe_pm_runtime)(xe); \ err = xe_sriov_pf_wait_ready(xe) ?: \ xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \ if (!err) \ xe_sriov_pf_provision_set_custom_mode(xe); \ - xe_pm_runtime_put(xe); \ \ return err; \ } \ -- 2.47.3