return 0;
}
+/**
+ * xe_gt_runtime_suspend() - GT runtime suspend
+ * @gt: the GT object
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int xe_gt_runtime_suspend(struct xe_gt *gt)
+{
+ unsigned int fw_ref;
+ int err = -ETIMEDOUT;
+
+ xe_gt_dbg(gt, "runtime suspending\n");
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ goto err_force_wake;
+
+ xe_uc_runtime_suspend(>->uc);
+ xe_gt_disable_host_l2_vram(gt);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ xe_gt_dbg(gt, "runtime suspended\n");
+
+ return 0;
+
+err_force_wake:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return err;
+}
+
+/**
+ * xe_gt_runtime_resume() - GT runtime resume
+ * @gt: the GT object
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int xe_gt_runtime_resume(struct xe_gt *gt)
+{
+ unsigned int fw_ref;
+ int err = -ETIMEDOUT;
+
+ xe_gt_dbg(gt, "runtime resuming\n");
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ goto err_force_wake;
+
+ xe_gt_enable_host_l2_vram(gt);
+ xe_uc_runtime_resume(>->uc);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ xe_gt_dbg(gt, "runtime resumed\n");
+
+ return 0;
+
+err_force_wake:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return err;
+}
+
struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
enum xe_engine_class class,
u16 instance, bool logical)
void xe_gt_shutdown(struct xe_gt *gt);
int xe_gt_resume(struct xe_gt *gt);
void xe_gt_reset_async(struct xe_gt *gt);
+int xe_gt_runtime_resume(struct xe_gt *gt);
+int xe_gt_runtime_suspend(struct xe_gt *gt);
void xe_gt_sanitize(struct xe_gt *gt);
int xe_gt_sanitize_freq(struct xe_gt *gt);
return xe_guc_submit_start(guc);
}
+/**
+ * xe_guc_runtime_suspend() - GuC runtime suspend
+ * @guc: The GuC object
+ *
+ * Stop further runs of submission tasks on given GuC and runtime suspend
+ * GuC CT.
+ */
+void xe_guc_runtime_suspend(struct xe_guc *guc)
+{
+ xe_guc_submit_pause(guc);
+ xe_guc_submit_disable(guc);
+ xe_guc_ct_runtime_suspend(&guc->ct);
+}
+
+/**
+ * xe_guc_runtime_resume() - GuC runtime resume
+ * @guc: The GuC object
+ *
+ * Runtime resume GuC CT and allow further runs of submission tasks on
+ * given GuC.
+ */
+void xe_guc_runtime_resume(struct xe_guc *guc)
+{
+ /*
+ * Runtime PM flows are not applicable for VFs, so it's safe to
+ * directly enable IRQ.
+ */
+ guc_enable_irq(guc);
+
+ xe_guc_ct_runtime_resume(&guc->ct);
+ xe_guc_submit_enable(guc);
+ xe_guc_submit_unpause(guc);
+}
+
void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
{
struct xe_gt *gt = guc_to_gt(guc);
int xe_guc_min_load_for_hwconfig(struct xe_guc *guc);
int xe_guc_enable_communication(struct xe_guc *guc);
int xe_guc_opt_in_features_enable(struct xe_guc *guc);
+void xe_guc_runtime_suspend(struct xe_guc *guc);
+void xe_guc_runtime_resume(struct xe_guc *guc);
int xe_guc_suspend(struct xe_guc *guc);
void xe_guc_notify(struct xe_guc *guc);
int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr);
stop_g2h_handler(ct);
}
+/**
+ * xe_guc_ct_runtime_suspend() - GuC CT runtime suspend
+ * @ct: the &xe_guc_ct
+ *
+ * Set GuC CT to disabled state.
+ */
+void xe_guc_ct_runtime_suspend(struct xe_guc_ct *ct)
+{
+ /*
+ * Since we're already in runtime suspend path, we shouldn't have pending
+ * messages. But if there happen to be any, we'd probably want them to be
+ * thrown as errors for further investigation.
+ */
+ xe_guc_ct_disable(ct);
+}
+
+/**
+ * xe_guc_ct_runtime_resume() - GuC CT runtime resume
+ * @ct: the &xe_guc_ct
+ *
+ * Restart GuC CT and set it to enabled state.
+ */
+void xe_guc_ct_runtime_resume(struct xe_guc_ct *ct)
+{
+ xe_guc_ct_restart(ct);
+}
+
static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
{
struct guc_ctb *h2g = &ct->ctbs.h2g;
int xe_guc_ct_enable(struct xe_guc_ct *ct);
int xe_guc_ct_restart(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
+void xe_guc_ct_runtime_resume(struct xe_guc_ct *ct);
+void xe_guc_ct_runtime_suspend(struct xe_guc_ct *ct);
void xe_guc_ct_stop(struct xe_guc_ct *ct);
void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct);
void xe_guc_ct_fast_path(struct xe_guc_ct *ct);
}
for_each_gt(gt, xe, id) {
- err = xe_gt_suspend(gt);
+ err = xe->d3cold.allowed ? xe_gt_suspend(gt) : xe_gt_runtime_suspend(gt);
if (err)
goto out_resume;
}
xe_rpm_lockmap_acquire(xe);
- for_each_gt(gt, xe, id)
- xe_gt_idle_disable_c6(gt);
-
if (xe->d3cold.allowed) {
+ for_each_gt(gt, xe, id)
+ xe_gt_idle_disable_c6(gt);
+
err = xe_pcode_ready(xe, true);
if (err)
goto out;
xe_irq_resume(xe);
for_each_gt(gt, xe, id)
- xe_gt_resume(gt);
+ xe->d3cold.allowed ? xe_gt_resume(gt) : xe_gt_runtime_resume(gt);
xe_display_pm_runtime_resume(xe);
return xe_guc_suspend(&uc->guc);
}
+/**
+ * xe_uc_runtime_suspend() - UC runtime suspend
+ * @uc: the UC object
+ *
+ * Runtime suspend all UCs.
+ */
+void xe_uc_runtime_suspend(struct xe_uc *uc)
+{
+ if (!xe_device_uc_enabled(uc_to_xe(uc)))
+ return;
+
+ xe_guc_runtime_suspend(&uc->guc);
+}
+
+/**
+ * xe_uc_runtime_resume() - UC runtime resume
+ * @uc: the UC object
+ *
+ * Runtime resume all UCs.
+ */
+void xe_uc_runtime_resume(struct xe_uc *uc)
+{
+ if (!xe_device_uc_enabled(uc_to_xe(uc)))
+ return;
+
+ xe_guc_runtime_resume(&uc->guc);
+}
+
/**
* xe_uc_declare_wedged() - Declare UC wedged
* @uc: the UC object
int xe_uc_load_hw(struct xe_uc *uc);
void xe_uc_gucrc_disable(struct xe_uc *uc);
int xe_uc_reset_prepare(struct xe_uc *uc);
+void xe_uc_runtime_resume(struct xe_uc *uc);
+void xe_uc_runtime_suspend(struct xe_uc *uc);
void xe_uc_stop_prepare(struct xe_uc *uc);
void xe_uc_stop(struct xe_uc *uc);
int xe_uc_start(struct xe_uc *uc);