drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold);
- xe_pm_runtime_get(xe);
+ guard(xe_pm_runtime)(xe);
ret = xe_pm_set_vram_threshold(xe, vram_d3cold_threshold);
- xe_pm_runtime_put(xe);
return ret ?: count;
}
u16 major = 0, minor = 0, hotfix = 0, build = 0;
int ret;
- xe_pm_runtime_get(xe);
+ guard(xe_pm_runtime)(xe);
ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
&cap, NULL);
if (ret)
- goto out;
+ return ret;
if (REG_FIELD_GET(V1_FAN_PROVISIONED, cap)) {
ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
&ver_low, NULL);
if (ret)
- goto out;
+ return ret;
ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
&ver_high, NULL);
if (ret)
- goto out;
+ return ret;
major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
}
-out:
- xe_pm_runtime_put(xe);
- return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
+ return sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
}
static DEVICE_ATTR_ADMIN_RO(lb_fan_control_version);
u16 major = 0, minor = 0, hotfix = 0, build = 0;
int ret;
- xe_pm_runtime_get(xe);
+ guard(xe_pm_runtime)(xe);
ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
&cap, NULL);
if (ret)
- goto out;
+ return ret;
if (REG_FIELD_GET(VR_PARAMS_PROVISIONED, cap)) {
ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
&ver_low, NULL);
if (ret)
- goto out;
+ return ret;
ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
&ver_high, NULL);
if (ret)
- goto out;
+ return ret;
major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
}
-out:
- xe_pm_runtime_put(xe);
- return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
+ return sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
}
static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version);
struct xe_device *xe = pdev_to_xe_device(pdev);
u32 cap, val;
- xe_pm_runtime_get(xe);
+ guard(xe_pm_runtime)(xe);
val = xe_mmio_read32(xe_root_tile_mmio(xe), BMG_PCIE_CAP);
- xe_pm_runtime_put(xe);
cap = REG_FIELD_GET(LINK_DOWNGRADE, val);
return sysfs_emit(buf, "%u\n", cap == DOWNGRADE_CAPABLE);
u32 val = 0;
int ret;
- xe_pm_runtime_get(xe);
+ guard(xe_pm_runtime)(xe);
ret = xe_pcode_read(xe_device_get_root_tile(xe),
PCODE_MBOX(DGFX_PCODE_STATUS, DGFX_GET_INIT_STATUS, 0),
&val, NULL);
- xe_pm_runtime_put(xe);
return ret ?: sysfs_emit(buf, "%u\n", REG_FIELD_GET(DGFX_LINK_DOWNGRADE_STATUS, val));
}
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
- xe_pm_runtime_get(dev_to_xe(dev));
+ guard(xe_pm_runtime)(dev_to_xe(dev));
freq = xe_guc_pc_get_act_freq(pc);
- xe_pm_runtime_put(dev_to_xe(dev));
return sysfs_emit(buf, "%d\n", freq);
}
u32 freq;
ssize_t ret;
- xe_pm_runtime_get(dev_to_xe(dev));
+ guard(xe_pm_runtime)(dev_to_xe(dev));
ret = xe_guc_pc_get_cur_freq(pc, &freq);
- xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
- xe_pm_runtime_get(dev_to_xe(dev));
+ guard(xe_pm_runtime)(dev_to_xe(dev));
freq = xe_guc_pc_get_rpe_freq(pc);
- xe_pm_runtime_put(dev_to_xe(dev));
return sysfs_emit(buf, "%d\n", freq);
}
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
- xe_pm_runtime_get(dev_to_xe(dev));
+ guard(xe_pm_runtime)(dev_to_xe(dev));
freq = xe_guc_pc_get_rpa_freq(pc);
- xe_pm_runtime_put(dev_to_xe(dev));
return sysfs_emit(buf, "%d\n", freq);
}
u32 freq;
ssize_t ret;
- xe_pm_runtime_get(dev_to_xe(dev));
+ guard(xe_pm_runtime)(dev_to_xe(dev));
ret = xe_guc_pc_get_min_freq(pc, &freq);
- xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
if (ret)
return ret;
- xe_pm_runtime_get(dev_to_xe(dev));
+ guard(xe_pm_runtime)(dev_to_xe(dev));
ret = xe_guc_pc_set_min_freq(pc, freq);
- xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
u32 freq;
ssize_t ret;
- xe_pm_runtime_get(dev_to_xe(dev));
+ guard(xe_pm_runtime)(dev_to_xe(dev));
ret = xe_guc_pc_get_max_freq(pc, &freq);
- xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
if (ret)
return ret;
- xe_pm_runtime_get(dev_to_xe(dev));
+ guard(xe_pm_runtime)(dev_to_xe(dev));
ret = xe_guc_pc_set_max_freq(pc, freq);
- xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
struct xe_guc_pc *pc = dev_to_pc(dev);
int err;
- xe_pm_runtime_get(dev_to_xe(dev));
+ guard(xe_pm_runtime)(dev_to_xe(dev));
err = xe_guc_pc_set_power_profile(pc, buff);
- xe_pm_runtime_put(dev_to_xe(dev));
return err ?: count;
}
{
struct xe_device *xe = gt_to_xe(gt);
struct xe_reg reg;
- u32 val, mask;
+ u32 mask;
if (xe_gt_is_media_type(gt))
reg = MTL_MEDIA_PERF_LIMIT_REASONS;
else
mask = GT0_PERF_LIMIT_REASONS_MASK;
- xe_pm_runtime_get(xe);
- val = xe_mmio_read32(>->mmio, reg) & mask;
- xe_pm_runtime_put(xe);
-
- return val;
+ guard(xe_pm_runtime)(xe);
+ return xe_mmio_read32(>->mmio, reg) & mask;
}
static bool is_throttled_by(struct xe_gt *gt, u32 mask)
{
struct xe_device *xe = kobj_to_xe(kobj);
struct kobj_attribute *kattr;
- ssize_t ret = -EIO;
kattr = container_of(attr, struct kobj_attribute, attr);
if (kattr->show) {
- xe_pm_runtime_get(xe);
- ret = kattr->show(kobj, kattr, buf);
- xe_pm_runtime_put(xe);
+ guard(xe_pm_runtime)(xe);
+ return kattr->show(kobj, kattr, buf);
}
- return ret;
+ return -EIO;
}
static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
{
struct xe_device *xe = kobj_to_xe(kobj);
struct kobj_attribute *kattr;
- ssize_t ret = -EIO;
kattr = container_of(attr, struct kobj_attribute, attr);
if (kattr->store) {
- xe_pm_runtime_get(xe);
- ret = kattr->store(kobj, kattr, buf, count);
- xe_pm_runtime_put(xe);
+ guard(xe_pm_runtime)(xe);
+ return kattr->store(kobj, kattr, buf, count);
}
- return ret;
+ return -EIO;
}
static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {