1 // SPDX-License-Identifier: MIT
3 * Copyright © 2016-2019 Intel Corporation
6 #include <linux/string_helpers.h>
8 #include "gt/intel_gt.h"
9 #include "gt/intel_gt_print.h"
10 #include "gt/intel_reset.h"
11 #include "intel_gsc_fw.h"
12 #include "intel_gsc_uc.h"
13 #include "intel_guc.h"
14 #include "intel_guc_ads.h"
15 #include "intel_guc_print.h"
16 #include "intel_guc_submission.h"
17 #include "gt/intel_rps.h"
21 #include "i915_hwmon.h"
23 static const struct intel_uc_ops uc_ops_off
;
24 static const struct intel_uc_ops uc_ops_on
;
26 static void uc_expand_default_options(struct intel_uc
*uc
)
28 struct drm_i915_private
*i915
= uc_to_gt(uc
)->i915
;
30 if (i915
->params
.enable_guc
!= -1)
33 /* Don't enable GuC/HuC on pre-Gen12 */
34 if (GRAPHICS_VER(i915
) < 12) {
35 i915
->params
.enable_guc
= 0;
39 /* Don't enable GuC/HuC on older Gen12 platforms */
40 if (IS_TIGERLAKE(i915
) || IS_ROCKETLAKE(i915
)) {
41 i915
->params
.enable_guc
= 0;
45 /* Intermediate platforms are HuC authentication only */
46 if (IS_ALDERLAKE_S(i915
) && !IS_RAPTORLAKE_S(i915
)) {
47 i915
->params
.enable_guc
= ENABLE_GUC_LOAD_HUC
;
51 /* Default: enable HuC authentication and GuC submission */
52 i915
->params
.enable_guc
= ENABLE_GUC_LOAD_HUC
| ENABLE_GUC_SUBMISSION
;
54 /* XEHPSDV and PVC do not use HuC */
55 if (IS_XEHPSDV(i915
) || IS_PONTEVECCHIO(i915
))
56 i915
->params
.enable_guc
&= ~ENABLE_GUC_LOAD_HUC
;
59 /* Reset GuC providing us with fresh state for both GuC and HuC.
61 static int __intel_uc_reset_hw(struct intel_uc
*uc
)
63 struct intel_gt
*gt
= uc_to_gt(uc
);
67 ret
= i915_inject_probe_error(gt
->i915
, -ENXIO
);
71 ret
= intel_reset_guc(gt
);
73 gt_err(gt
, "Failed to reset GuC, ret = %d\n", ret
);
77 guc_status
= intel_uncore_read(gt
->uncore
, GUC_STATUS
);
78 gt_WARN(gt
, !(guc_status
& GS_MIA_IN_RESET
),
79 "GuC status: 0x%x, MIA core expected to be in reset\n",
85 static void __confirm_options(struct intel_uc
*uc
)
87 struct intel_gt
*gt
= uc_to_gt(uc
);
88 struct drm_i915_private
*i915
= gt
->i915
;
90 gt_dbg(gt
, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
91 i915
->params
.enable_guc
,
92 str_yes_no(intel_uc_wants_guc(uc
)),
93 str_yes_no(intel_uc_wants_guc_submission(uc
)),
94 str_yes_no(intel_uc_wants_huc(uc
)),
95 str_yes_no(intel_uc_wants_guc_slpc(uc
)));
97 if (i915
->params
.enable_guc
== 0) {
98 GEM_BUG_ON(intel_uc_wants_guc(uc
));
99 GEM_BUG_ON(intel_uc_wants_guc_submission(uc
));
100 GEM_BUG_ON(intel_uc_wants_huc(uc
));
101 GEM_BUG_ON(intel_uc_wants_guc_slpc(uc
));
105 if (!intel_uc_supports_guc(uc
))
106 gt_info(gt
, "Incompatible option enable_guc=%d - %s\n",
107 i915
->params
.enable_guc
, "GuC is not supported!");
109 if (i915
->params
.enable_guc
& ENABLE_GUC_SUBMISSION
&&
110 !intel_uc_supports_guc_submission(uc
))
111 gt_info(gt
, "Incompatible option enable_guc=%d - %s\n",
112 i915
->params
.enable_guc
, "GuC submission is N/A");
114 if (i915
->params
.enable_guc
& ~ENABLE_GUC_MASK
)
115 gt_info(gt
, "Incompatible option enable_guc=%d - %s\n",
116 i915
->params
.enable_guc
, "undocumented flag");
119 void intel_uc_init_early(struct intel_uc
*uc
)
121 uc_expand_default_options(uc
);
123 intel_guc_init_early(&uc
->guc
);
124 intel_huc_init_early(&uc
->huc
);
125 intel_gsc_uc_init_early(&uc
->gsc
);
127 __confirm_options(uc
);
129 if (intel_uc_wants_guc(uc
))
130 uc
->ops
= &uc_ops_on
;
132 uc
->ops
= &uc_ops_off
;
135 void intel_uc_init_late(struct intel_uc
*uc
)
137 intel_guc_init_late(&uc
->guc
);
138 intel_gsc_uc_load_start(&uc
->gsc
);
141 void intel_uc_driver_late_release(struct intel_uc
*uc
)
146 * intel_uc_init_mmio - setup uC MMIO access
147 * @uc: the intel_uc structure
149 * Setup minimal state necessary for MMIO accesses later in the
150 * initialization sequence.
152 void intel_uc_init_mmio(struct intel_uc
*uc
)
154 intel_guc_init_send_regs(&uc
->guc
);
157 static void __uc_capture_load_err_log(struct intel_uc
*uc
)
159 struct intel_guc
*guc
= &uc
->guc
;
161 if (guc
->log
.vma
&& !uc
->load_err_log
)
162 uc
->load_err_log
= i915_gem_object_get(guc
->log
.vma
->obj
);
165 static void __uc_free_load_err_log(struct intel_uc
*uc
)
167 struct drm_i915_gem_object
*log
= fetch_and_zero(&uc
->load_err_log
);
170 i915_gem_object_put(log
);
173 void intel_uc_driver_remove(struct intel_uc
*uc
)
175 intel_uc_fini_hw(uc
);
177 __uc_free_load_err_log(uc
);
181 * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
182 * register using the same bits used in the CT message payload. Since our
183 * communication channel with guc is turned off at this point, we can save the
184 * message and handle it after we turn it back on.
186 static void guc_clear_mmio_msg(struct intel_guc
*guc
)
188 intel_uncore_write(guc_to_gt(guc
)->uncore
, SOFT_SCRATCH(15), 0);
191 static void guc_get_mmio_msg(struct intel_guc
*guc
)
195 spin_lock_irq(&guc
->irq_lock
);
197 val
= intel_uncore_read(guc_to_gt(guc
)->uncore
, SOFT_SCRATCH(15));
198 guc
->mmio_msg
|= val
& guc
->msg_enabled_mask
;
201 * clear all events, including the ones we're not currently servicing,
202 * to make sure we don't try to process a stale message if we enable
203 * handling of more events later.
205 guc_clear_mmio_msg(guc
);
207 spin_unlock_irq(&guc
->irq_lock
);
210 static void guc_handle_mmio_msg(struct intel_guc
*guc
)
212 /* we need communication to be enabled to reply to GuC */
213 GEM_BUG_ON(!intel_guc_ct_enabled(&guc
->ct
));
215 spin_lock_irq(&guc
->irq_lock
);
217 intel_guc_to_host_process_recv_msg(guc
, &guc
->mmio_msg
, 1);
220 spin_unlock_irq(&guc
->irq_lock
);
223 static int guc_enable_communication(struct intel_guc
*guc
)
225 struct intel_gt
*gt
= guc_to_gt(guc
);
226 struct drm_i915_private
*i915
= gt
->i915
;
229 GEM_BUG_ON(intel_guc_ct_enabled(&guc
->ct
));
231 ret
= i915_inject_probe_error(i915
, -ENXIO
);
235 ret
= intel_guc_ct_enable(&guc
->ct
);
239 /* check for mmio messages received before/during the CT enable */
240 guc_get_mmio_msg(guc
);
241 guc_handle_mmio_msg(guc
);
243 intel_guc_enable_interrupts(guc
);
245 /* check for CT messages received before we enabled interrupts */
246 spin_lock_irq(gt
->irq_lock
);
247 intel_guc_ct_event_handler(&guc
->ct
);
248 spin_unlock_irq(gt
->irq_lock
);
250 guc_dbg(guc
, "communication enabled\n");
255 static void guc_disable_communication(struct intel_guc
*guc
)
258 * Events generated during or after CT disable are logged by guc in
259 * via mmio. Make sure the register is clear before disabling CT since
260 * all events we cared about have already been processed via CT.
262 guc_clear_mmio_msg(guc
);
264 intel_guc_disable_interrupts(guc
);
266 intel_guc_ct_disable(&guc
->ct
);
269 * Check for messages received during/after the CT disable. We do not
270 * expect any messages to have arrived via CT between the interrupt
271 * disable and the CT disable because GuC should've been idle until we
272 * triggered the CT disable protocol.
274 guc_get_mmio_msg(guc
);
276 guc_dbg(guc
, "communication disabled\n");
279 static void __uc_fetch_firmwares(struct intel_uc
*uc
)
281 struct intel_gt
*gt
= uc_to_gt(uc
);
284 GEM_BUG_ON(!intel_uc_wants_guc(uc
));
286 err
= intel_uc_fw_fetch(&uc
->guc
.fw
);
288 /* Make sure we transition out of transient "SELECTED" state */
289 if (intel_uc_wants_huc(uc
)) {
290 gt_dbg(gt
, "Failed to fetch GuC fw (%pe) disabling HuC\n", ERR_PTR(err
));
291 intel_uc_fw_change_status(&uc
->huc
.fw
,
292 INTEL_UC_FIRMWARE_ERROR
);
295 if (intel_uc_wants_gsc_uc(uc
)) {
296 gt_dbg(gt
, "Failed to fetch GuC fw (%pe) disabling GSC\n", ERR_PTR(err
));
297 intel_uc_fw_change_status(&uc
->gsc
.fw
,
298 INTEL_UC_FIRMWARE_ERROR
);
304 if (intel_uc_wants_huc(uc
))
305 intel_uc_fw_fetch(&uc
->huc
.fw
);
307 if (intel_uc_wants_gsc_uc(uc
))
308 intel_uc_fw_fetch(&uc
->gsc
.fw
);
311 static void __uc_cleanup_firmwares(struct intel_uc
*uc
)
313 intel_uc_fw_cleanup_fetch(&uc
->gsc
.fw
);
314 intel_uc_fw_cleanup_fetch(&uc
->huc
.fw
);
315 intel_uc_fw_cleanup_fetch(&uc
->guc
.fw
);
318 static int __uc_init(struct intel_uc
*uc
)
320 struct intel_guc
*guc
= &uc
->guc
;
321 struct intel_huc
*huc
= &uc
->huc
;
324 GEM_BUG_ON(!intel_uc_wants_guc(uc
));
326 if (!intel_uc_uses_guc(uc
))
329 if (i915_inject_probe_failure(uc_to_gt(uc
)->i915
))
332 ret
= intel_guc_init(guc
);
336 if (intel_uc_uses_huc(uc
))
339 if (intel_uc_uses_gsc_uc(uc
))
340 intel_gsc_uc_init(&uc
->gsc
);
345 static void __uc_fini(struct intel_uc
*uc
)
347 intel_gsc_uc_fini(&uc
->gsc
);
348 intel_huc_fini(&uc
->huc
);
349 intel_guc_fini(&uc
->guc
);
352 static int __uc_sanitize(struct intel_uc
*uc
)
354 struct intel_guc
*guc
= &uc
->guc
;
355 struct intel_huc
*huc
= &uc
->huc
;
357 GEM_BUG_ON(!intel_uc_supports_guc(uc
));
359 intel_huc_sanitize(huc
);
360 intel_guc_sanitize(guc
);
362 return __intel_uc_reset_hw(uc
);
365 /* Initialize and verify the uC regs related to uC positioning in WOPCM */
366 static int uc_init_wopcm(struct intel_uc
*uc
)
368 struct intel_gt
*gt
= uc_to_gt(uc
);
369 struct intel_uncore
*uncore
= gt
->uncore
;
370 u32 base
= intel_wopcm_guc_base(>
->wopcm
);
371 u32 size
= intel_wopcm_guc_size(>
->wopcm
);
372 u32 huc_agent
= intel_uc_uses_huc(uc
) ? HUC_LOADING_AGENT_GUC
: 0;
376 if (unlikely(!base
|| !size
)) {
377 gt_probe_error(gt
, "Unsuccessful WOPCM partitioning\n");
381 GEM_BUG_ON(!intel_uc_supports_guc(uc
));
382 GEM_BUG_ON(!(base
& GUC_WOPCM_OFFSET_MASK
));
383 GEM_BUG_ON(base
& ~GUC_WOPCM_OFFSET_MASK
);
384 GEM_BUG_ON(!(size
& GUC_WOPCM_SIZE_MASK
));
385 GEM_BUG_ON(size
& ~GUC_WOPCM_SIZE_MASK
);
387 err
= i915_inject_probe_error(gt
->i915
, -ENXIO
);
391 mask
= GUC_WOPCM_SIZE_MASK
| GUC_WOPCM_SIZE_LOCKED
;
392 err
= intel_uncore_write_and_verify(uncore
, GUC_WOPCM_SIZE
, size
, mask
,
393 size
| GUC_WOPCM_SIZE_LOCKED
);
397 mask
= GUC_WOPCM_OFFSET_MASK
| GUC_WOPCM_OFFSET_VALID
| huc_agent
;
398 err
= intel_uncore_write_and_verify(uncore
, DMA_GUC_WOPCM_OFFSET
,
399 base
| huc_agent
, mask
,
401 GUC_WOPCM_OFFSET_VALID
);
408 gt_probe_error(gt
, "Failed to init uC WOPCM registers!\n");
409 gt_probe_error(gt
, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
410 i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET
),
411 intel_uncore_read(uncore
, DMA_GUC_WOPCM_OFFSET
));
412 gt_probe_error(gt
, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
413 i915_mmio_reg_offset(GUC_WOPCM_SIZE
),
414 intel_uncore_read(uncore
, GUC_WOPCM_SIZE
));
419 static bool uc_is_wopcm_locked(struct intel_uc
*uc
)
421 struct intel_gt
*gt
= uc_to_gt(uc
);
422 struct intel_uncore
*uncore
= gt
->uncore
;
424 return (intel_uncore_read(uncore
, GUC_WOPCM_SIZE
) & GUC_WOPCM_SIZE_LOCKED
) ||
425 (intel_uncore_read(uncore
, DMA_GUC_WOPCM_OFFSET
) & GUC_WOPCM_OFFSET_VALID
);
428 static int __uc_check_hw(struct intel_uc
*uc
)
430 if (uc
->fw_table_invalid
)
433 if (!intel_uc_supports_guc(uc
))
437 * We can silently continue without GuC only if it was never enabled
438 * before on this system after reboot, otherwise we risk GPU hangs.
439 * To check if GuC was loaded before we look at WOPCM registers.
441 if (uc_is_wopcm_locked(uc
))
447 static void print_fw_ver(struct intel_gt
*gt
, struct intel_uc_fw
*fw
)
449 gt_info(gt
, "%s firmware %s version %u.%u.%u\n",
450 intel_uc_fw_type_repr(fw
->type
), fw
->file_selected
.path
,
451 fw
->file_selected
.ver
.major
,
452 fw
->file_selected
.ver
.minor
,
453 fw
->file_selected
.ver
.patch
);
456 static int __uc_init_hw(struct intel_uc
*uc
)
458 struct intel_gt
*gt
= uc_to_gt(uc
);
459 struct drm_i915_private
*i915
= gt
->i915
;
460 struct intel_guc
*guc
= &uc
->guc
;
461 struct intel_huc
*huc
= &uc
->huc
;
465 GEM_BUG_ON(!intel_uc_supports_guc(uc
));
466 GEM_BUG_ON(!intel_uc_wants_guc(uc
));
468 print_fw_ver(gt
, &guc
->fw
);
470 if (intel_uc_uses_huc(uc
))
471 print_fw_ver(gt
, &huc
->fw
);
473 if (!intel_uc_fw_is_loadable(&guc
->fw
)) {
474 ret
= __uc_check_hw(uc
) ||
475 intel_uc_fw_is_overridden(&guc
->fw
) ||
476 intel_uc_wants_guc_submission(uc
) ?
477 intel_uc_fw_status_to_error(guc
->fw
.status
) : 0;
481 ret
= uc_init_wopcm(uc
);
485 intel_guc_reset_interrupts(guc
);
487 /* WaEnableuKernelHeaderValidFix:skl */
488 /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
489 if (GRAPHICS_VER(i915
) == 9)
494 /* Disable a potentially low PL1 power limit to allow freq to be raised */
495 i915_hwmon_power_max_disable(gt
->i915
, &pl1en
);
497 intel_rps_raise_unslice(&uc_to_gt(uc
)->rps
);
501 * Always reset the GuC just before (re)loading, so
502 * that the state and timing are fairly predictable
504 ret
= __uc_sanitize(uc
);
508 intel_huc_fw_upload(huc
);
509 intel_guc_ads_reset(guc
);
510 intel_guc_write_params(guc
);
511 ret
= intel_guc_fw_upload(guc
);
515 gt_dbg(gt
, "GuC fw load failed (%pe) will reset and retry %d more time(s)\n",
516 ERR_PTR(ret
), attempts
);
519 /* Did we succeded or run out of retries? */
521 goto err_log_capture
;
523 ret
= guc_enable_communication(guc
);
525 goto err_log_capture
;
528 * GSC-loaded HuC is authenticated by the GSC, so we don't need to
529 * trigger the auth here. However, given that the HuC loaded this way
530 * survive GT reset, we still need to update our SW bookkeeping to make
531 * sure it reflects the correct HW status.
533 if (intel_huc_is_loaded_by_gsc(huc
))
534 intel_huc_update_auth_status(huc
);
536 intel_huc_auth(huc
, INTEL_HUC_AUTH_BY_GUC
);
538 if (intel_uc_uses_guc_submission(uc
)) {
539 ret
= intel_guc_submission_enable(guc
);
541 goto err_log_capture
;
544 if (intel_uc_uses_guc_slpc(uc
)) {
545 ret
= intel_guc_slpc_enable(&guc
->slpc
);
549 /* Restore GT back to RPn for non-SLPC path */
550 intel_rps_lower_unslice(&uc_to_gt(uc
)->rps
);
553 i915_hwmon_power_max_restore(gt
->i915
, pl1en
);
555 guc_info(guc
, "submission %s\n", str_enabled_disabled(intel_uc_uses_guc_submission(uc
)));
556 guc_info(guc
, "SLPC %s\n", str_enabled_disabled(intel_uc_uses_guc_slpc(uc
)));
561 * We've failed to load the firmware :(
564 intel_guc_submission_disable(guc
);
566 __uc_capture_load_err_log(uc
);
568 /* Return GT back to RPn */
569 intel_rps_lower_unslice(&uc_to_gt(uc
)->rps
);
571 i915_hwmon_power_max_restore(gt
->i915
, pl1en
);
576 gt_notice(gt
, "GuC is uninitialized\n");
577 /* We want to run without GuC submission */
581 gt_probe_error(gt
, "GuC initialization failed %pe\n", ERR_PTR(ret
));
583 /* We want to keep KMS alive */
587 static void __uc_fini_hw(struct intel_uc
*uc
)
589 struct intel_guc
*guc
= &uc
->guc
;
591 if (!intel_guc_is_fw_running(guc
))
594 if (intel_uc_uses_guc_submission(uc
))
595 intel_guc_submission_disable(guc
);
601 * intel_uc_reset_prepare - Prepare for reset
602 * @uc: the intel_uc structure
604 * Preparing for full gpu reset.
606 void intel_uc_reset_prepare(struct intel_uc
*uc
)
608 struct intel_guc
*guc
= &uc
->guc
;
610 uc
->reset_in_progress
= true;
612 /* Nothing to do if GuC isn't supported */
613 if (!intel_uc_supports_guc(uc
))
616 /* Firmware expected to be running when this function is called */
617 if (!intel_guc_is_ready(guc
))
620 if (intel_uc_uses_guc_submission(uc
))
621 intel_guc_submission_reset_prepare(guc
);
627 void intel_uc_reset(struct intel_uc
*uc
, intel_engine_mask_t stalled
)
629 struct intel_guc
*guc
= &uc
->guc
;
631 /* Firmware can not be running when this function is called */
632 if (intel_uc_uses_guc_submission(uc
))
633 intel_guc_submission_reset(guc
, stalled
);
636 void intel_uc_reset_finish(struct intel_uc
*uc
)
638 struct intel_guc
*guc
= &uc
->guc
;
640 uc
->reset_in_progress
= false;
642 /* Firmware expected to be running when this function is called */
643 if (intel_uc_uses_guc_submission(uc
))
644 intel_guc_submission_reset_finish(guc
);
647 void intel_uc_cancel_requests(struct intel_uc
*uc
)
649 struct intel_guc
*guc
= &uc
->guc
;
651 /* Firmware can not be running when this function is called */
652 if (intel_uc_uses_guc_submission(uc
))
653 intel_guc_submission_cancel_requests(guc
);
656 void intel_uc_runtime_suspend(struct intel_uc
*uc
)
658 struct intel_guc
*guc
= &uc
->guc
;
660 if (!intel_guc_is_ready(guc
)) {
661 guc
->interrupts
.enabled
= false;
666 * Wait for any outstanding CTB before tearing down communication /w the
669 #define OUTSTANDING_CTB_TIMEOUT_PERIOD (HZ / 5)
670 intel_guc_wait_for_pending_msg(guc
, &guc
->outstanding_submission_g2h
,
671 false, OUTSTANDING_CTB_TIMEOUT_PERIOD
);
672 GEM_WARN_ON(atomic_read(&guc
->outstanding_submission_g2h
));
674 guc_disable_communication(guc
);
677 void intel_uc_suspend(struct intel_uc
*uc
)
679 struct intel_guc
*guc
= &uc
->guc
;
680 intel_wakeref_t wakeref
;
683 /* flush the GSC worker */
684 intel_gsc_uc_flush_work(&uc
->gsc
);
686 wake_up_all_tlb_invalidate(guc
);
688 if (!intel_guc_is_ready(guc
)) {
689 guc
->interrupts
.enabled
= false;
693 intel_guc_submission_flush_work(guc
);
695 with_intel_runtime_pm(&uc_to_gt(uc
)->i915
->runtime_pm
, wakeref
) {
696 err
= intel_guc_suspend(guc
);
698 guc_dbg(guc
, "Failed to suspend, %pe", ERR_PTR(err
));
702 static void __uc_resume_mappings(struct intel_uc
*uc
)
704 intel_uc_fw_resume_mapping(&uc
->guc
.fw
);
705 intel_uc_fw_resume_mapping(&uc
->huc
.fw
);
708 static int __uc_resume(struct intel_uc
*uc
, bool enable_communication
)
710 struct intel_guc
*guc
= &uc
->guc
;
711 struct intel_gt
*gt
= guc_to_gt(guc
);
714 if (!intel_guc_is_fw_running(guc
))
717 /* Make sure we enable communication if and only if it's disabled */
718 GEM_BUG_ON(enable_communication
== intel_guc_ct_enabled(&guc
->ct
));
720 if (enable_communication
)
721 guc_enable_communication(guc
);
723 /* If we are only resuming GuC communication but not reloading
724 * GuC, we need to ensure the ARAT timer interrupt is enabled
725 * again. In case of GuC reload, it is enabled during SLPC enable.
727 if (enable_communication
&& intel_uc_uses_guc_slpc(uc
))
728 intel_guc_pm_intrmsk_enable(gt
);
730 err
= intel_guc_resume(guc
);
732 guc_dbg(guc
, "Failed to resume, %pe", ERR_PTR(err
));
736 intel_gsc_uc_resume(&uc
->gsc
);
738 if (intel_guc_tlb_invalidation_is_available(guc
)) {
739 intel_guc_invalidate_tlb_engines(guc
);
740 intel_guc_invalidate_tlb_guc(guc
);
746 int intel_uc_resume(struct intel_uc
*uc
)
749 * When coming out of S3/S4 we sanitize and re-init the HW, so
750 * communication is already re-enabled at this point.
752 return __uc_resume(uc
, false);
755 int intel_uc_runtime_resume(struct intel_uc
*uc
)
758 * During runtime resume we don't sanitize, so we need to re-init
759 * communication as well.
761 return __uc_resume(uc
, true);
764 static const struct intel_uc_ops uc_ops_off
= {
765 .init_hw
= __uc_check_hw
,
766 .fini
= __uc_fini
, /* to clean-up the init_early initialization */
769 static const struct intel_uc_ops uc_ops_on
= {
770 .sanitize
= __uc_sanitize
,
772 .init_fw
= __uc_fetch_firmwares
,
773 .fini_fw
= __uc_cleanup_firmwares
,
778 .init_hw
= __uc_init_hw
,
779 .fini_hw
= __uc_fini_hw
,
781 .resume_mappings
= __uc_resume_mappings
,