cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_uc.c (17614B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2016-2019 Intel Corporation
      4 */
      5
      6#include <linux/string_helpers.h>
      7
      8#include "gt/intel_gt.h"
      9#include "gt/intel_reset.h"
     10#include "intel_guc.h"
     11#include "intel_guc_ads.h"
     12#include "intel_guc_submission.h"
     13#include "gt/intel_rps.h"
     14#include "intel_uc.h"
     15
     16#include "i915_drv.h"
     17
     18static const struct intel_uc_ops uc_ops_off;
     19static const struct intel_uc_ops uc_ops_on;
     20
     21static void uc_expand_default_options(struct intel_uc *uc)
     22{
     23	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
     24
     25	if (i915->params.enable_guc != -1)
     26		return;
     27
     28	/* Don't enable GuC/HuC on pre-Gen12 */
     29	if (GRAPHICS_VER(i915) < 12) {
     30		i915->params.enable_guc = 0;
     31		return;
     32	}
     33
     34	/* Don't enable GuC/HuC on older Gen12 platforms */
     35	if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
     36		i915->params.enable_guc = 0;
     37		return;
     38	}
     39
     40	/* Intermediate platforms are HuC authentication only */
     41	if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) {
     42		i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
     43		return;
     44	}
     45
     46	/* Default: enable HuC authentication and GuC submission */
     47	i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
     48}
     49
     50/* Reset GuC providing us with fresh state for both GuC and HuC.
     51 */
     52static int __intel_uc_reset_hw(struct intel_uc *uc)
     53{
     54	struct intel_gt *gt = uc_to_gt(uc);
     55	int ret;
     56	u32 guc_status;
     57
     58	ret = i915_inject_probe_error(gt->i915, -ENXIO);
     59	if (ret)
     60		return ret;
     61
     62	ret = intel_reset_guc(gt);
     63	if (ret) {
     64		DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
     65		return ret;
     66	}
     67
     68	guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
     69	WARN(!(guc_status & GS_MIA_IN_RESET),
     70	     "GuC status: 0x%x, MIA core expected to be in reset\n",
     71	     guc_status);
     72
     73	return ret;
     74}
     75
     76static void __confirm_options(struct intel_uc *uc)
     77{
     78	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
     79
     80	drm_dbg(&i915->drm,
     81		"enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
     82		i915->params.enable_guc,
     83		str_yes_no(intel_uc_wants_guc(uc)),
     84		str_yes_no(intel_uc_wants_guc_submission(uc)),
     85		str_yes_no(intel_uc_wants_huc(uc)),
     86		str_yes_no(intel_uc_wants_guc_slpc(uc)));
     87
     88	if (i915->params.enable_guc == 0) {
     89		GEM_BUG_ON(intel_uc_wants_guc(uc));
     90		GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
     91		GEM_BUG_ON(intel_uc_wants_huc(uc));
     92		GEM_BUG_ON(intel_uc_wants_guc_slpc(uc));
     93		return;
     94	}
     95
     96	if (!intel_uc_supports_guc(uc))
     97		drm_info(&i915->drm,
     98			 "Incompatible option enable_guc=%d - %s\n",
     99			 i915->params.enable_guc, "GuC is not supported!");
    100
    101	if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
    102	    !intel_uc_supports_huc(uc))
    103		drm_info(&i915->drm,
    104			 "Incompatible option enable_guc=%d - %s\n",
    105			 i915->params.enable_guc, "HuC is not supported!");
    106
    107	if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
    108	    !intel_uc_supports_guc_submission(uc))
    109		drm_info(&i915->drm,
    110			 "Incompatible option enable_guc=%d - %s\n",
    111			 i915->params.enable_guc, "GuC submission is N/A");
    112
    113	if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
    114		drm_info(&i915->drm,
    115			 "Incompatible option enable_guc=%d - %s\n",
    116			 i915->params.enable_guc, "undocumented flag");
    117}
    118
    119void intel_uc_init_early(struct intel_uc *uc)
    120{
    121	uc_expand_default_options(uc);
    122
    123	intel_guc_init_early(&uc->guc);
    124	intel_huc_init_early(&uc->huc);
    125
    126	__confirm_options(uc);
    127
    128	if (intel_uc_wants_guc(uc))
    129		uc->ops = &uc_ops_on;
    130	else
    131		uc->ops = &uc_ops_off;
    132}
    133
    134void intel_uc_init_late(struct intel_uc *uc)
    135{
    136	intel_guc_init_late(&uc->guc);
    137}
    138
    139void intel_uc_driver_late_release(struct intel_uc *uc)
    140{
    141}
    142
    143/**
    144 * intel_uc_init_mmio - setup uC MMIO access
    145 * @uc: the intel_uc structure
    146 *
    147 * Setup minimal state necessary for MMIO accesses later in the
    148 * initialization sequence.
    149 */
    150void intel_uc_init_mmio(struct intel_uc *uc)
    151{
    152	intel_guc_init_send_regs(&uc->guc);
    153}
    154
    155static void __uc_capture_load_err_log(struct intel_uc *uc)
    156{
    157	struct intel_guc *guc = &uc->guc;
    158
    159	if (guc->log.vma && !uc->load_err_log)
    160		uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
    161}
    162
    163static void __uc_free_load_err_log(struct intel_uc *uc)
    164{
    165	struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
    166
    167	if (log)
    168		i915_gem_object_put(log);
    169}
    170
    171void intel_uc_driver_remove(struct intel_uc *uc)
    172{
    173	intel_uc_fini_hw(uc);
    174	intel_uc_fini(uc);
    175	__uc_free_load_err_log(uc);
    176}
    177
    178/*
    179 * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
    180 * register using the same bits used in the CT message payload. Since our
    181 * communication channel with guc is turned off at this point, we can save the
    182 * message and handle it after we turn it back on.
    183 */
    184static void guc_clear_mmio_msg(struct intel_guc *guc)
    185{
    186	intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
    187}
    188
    189static void guc_get_mmio_msg(struct intel_guc *guc)
    190{
    191	u32 val;
    192
    193	spin_lock_irq(&guc->irq_lock);
    194
    195	val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
    196	guc->mmio_msg |= val & guc->msg_enabled_mask;
    197
    198	/*
    199	 * clear all events, including the ones we're not currently servicing,
    200	 * to make sure we don't try to process a stale message if we enable
    201	 * handling of more events later.
    202	 */
    203	guc_clear_mmio_msg(guc);
    204
    205	spin_unlock_irq(&guc->irq_lock);
    206}
    207
    208static void guc_handle_mmio_msg(struct intel_guc *guc)
    209{
    210	/* we need communication to be enabled to reply to GuC */
    211	GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
    212
    213	spin_lock_irq(&guc->irq_lock);
    214	if (guc->mmio_msg) {
    215		intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
    216		guc->mmio_msg = 0;
    217	}
    218	spin_unlock_irq(&guc->irq_lock);
    219}
    220
    221static int guc_enable_communication(struct intel_guc *guc)
    222{
    223	struct intel_gt *gt = guc_to_gt(guc);
    224	struct drm_i915_private *i915 = gt->i915;
    225	int ret;
    226
    227	GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
    228
    229	ret = i915_inject_probe_error(i915, -ENXIO);
    230	if (ret)
    231		return ret;
    232
    233	ret = intel_guc_ct_enable(&guc->ct);
    234	if (ret)
    235		return ret;
    236
    237	/* check for mmio messages received before/during the CT enable */
    238	guc_get_mmio_msg(guc);
    239	guc_handle_mmio_msg(guc);
    240
    241	intel_guc_enable_interrupts(guc);
    242
    243	/* check for CT messages received before we enabled interrupts */
    244	spin_lock_irq(&gt->irq_lock);
    245	intel_guc_ct_event_handler(&guc->ct);
    246	spin_unlock_irq(&gt->irq_lock);
    247
    248	drm_dbg(&i915->drm, "GuC communication enabled\n");
    249
    250	return 0;
    251}
    252
    253static void guc_disable_communication(struct intel_guc *guc)
    254{
    255	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
    256
    257	/*
    258	 * Events generated during or after CT disable are logged by guc in
    259	 * via mmio. Make sure the register is clear before disabling CT since
    260	 * all events we cared about have already been processed via CT.
    261	 */
    262	guc_clear_mmio_msg(guc);
    263
    264	intel_guc_disable_interrupts(guc);
    265
    266	intel_guc_ct_disable(&guc->ct);
    267
    268	/*
    269	 * Check for messages received during/after the CT disable. We do not
    270	 * expect any messages to have arrived via CT between the interrupt
    271	 * disable and the CT disable because GuC should've been idle until we
    272	 * triggered the CT disable protocol.
    273	 */
    274	guc_get_mmio_msg(guc);
    275
    276	drm_dbg(&i915->drm, "GuC communication disabled\n");
    277}
    278
    279static void __uc_fetch_firmwares(struct intel_uc *uc)
    280{
    281	int err;
    282
    283	GEM_BUG_ON(!intel_uc_wants_guc(uc));
    284
    285	err = intel_uc_fw_fetch(&uc->guc.fw);
    286	if (err) {
    287		/* Make sure we transition out of transient "SELECTED" state */
    288		if (intel_uc_wants_huc(uc)) {
    289			drm_dbg(&uc_to_gt(uc)->i915->drm,
    290				"Failed to fetch GuC: %d disabling HuC\n", err);
    291			intel_uc_fw_change_status(&uc->huc.fw,
    292						  INTEL_UC_FIRMWARE_ERROR);
    293		}
    294
    295		return;
    296	}
    297
    298	if (intel_uc_wants_huc(uc))
    299		intel_uc_fw_fetch(&uc->huc.fw);
    300}
    301
    302static void __uc_cleanup_firmwares(struct intel_uc *uc)
    303{
    304	intel_uc_fw_cleanup_fetch(&uc->huc.fw);
    305	intel_uc_fw_cleanup_fetch(&uc->guc.fw);
    306}
    307
    308static int __uc_init(struct intel_uc *uc)
    309{
    310	struct intel_guc *guc = &uc->guc;
    311	struct intel_huc *huc = &uc->huc;
    312	int ret;
    313
    314	GEM_BUG_ON(!intel_uc_wants_guc(uc));
    315
    316	if (!intel_uc_uses_guc(uc))
    317		return 0;
    318
    319	if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
    320		return -ENOMEM;
    321
    322	ret = intel_guc_init(guc);
    323	if (ret)
    324		return ret;
    325
    326	if (intel_uc_uses_huc(uc)) {
    327		ret = intel_huc_init(huc);
    328		if (ret)
    329			goto out_guc;
    330	}
    331
    332	return 0;
    333
    334out_guc:
    335	intel_guc_fini(guc);
    336	return ret;
    337}
    338
    339static void __uc_fini(struct intel_uc *uc)
    340{
    341	intel_huc_fini(&uc->huc);
    342	intel_guc_fini(&uc->guc);
    343}
    344
    345static int __uc_sanitize(struct intel_uc *uc)
    346{
    347	struct intel_guc *guc = &uc->guc;
    348	struct intel_huc *huc = &uc->huc;
    349
    350	GEM_BUG_ON(!intel_uc_supports_guc(uc));
    351
    352	intel_huc_sanitize(huc);
    353	intel_guc_sanitize(guc);
    354
    355	return __intel_uc_reset_hw(uc);
    356}
    357
    358/* Initialize and verify the uC regs related to uC positioning in WOPCM */
    359static int uc_init_wopcm(struct intel_uc *uc)
    360{
    361	struct intel_gt *gt = uc_to_gt(uc);
    362	struct intel_uncore *uncore = gt->uncore;
    363	u32 base = intel_wopcm_guc_base(&gt->i915->wopcm);
    364	u32 size = intel_wopcm_guc_size(&gt->i915->wopcm);
    365	u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
    366	u32 mask;
    367	int err;
    368
    369	if (unlikely(!base || !size)) {
    370		i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n");
    371		return -E2BIG;
    372	}
    373
    374	GEM_BUG_ON(!intel_uc_supports_guc(uc));
    375	GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
    376	GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
    377	GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
    378	GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
    379
    380	err = i915_inject_probe_error(gt->i915, -ENXIO);
    381	if (err)
    382		return err;
    383
    384	mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
    385	err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
    386					    size | GUC_WOPCM_SIZE_LOCKED);
    387	if (err)
    388		goto err_out;
    389
    390	mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
    391	err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET,
    392					    base | huc_agent, mask,
    393					    base | huc_agent |
    394					    GUC_WOPCM_OFFSET_VALID);
    395	if (err)
    396		goto err_out;
    397
    398	return 0;
    399
    400err_out:
    401	i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n");
    402	i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
    403			 i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
    404			 intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
    405	i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
    406			 i915_mmio_reg_offset(GUC_WOPCM_SIZE),
    407			 intel_uncore_read(uncore, GUC_WOPCM_SIZE));
    408
    409	return err;
    410}
    411
    412static bool uc_is_wopcm_locked(struct intel_uc *uc)
    413{
    414	struct intel_gt *gt = uc_to_gt(uc);
    415	struct intel_uncore *uncore = gt->uncore;
    416
    417	return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) ||
    418	       (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
    419}
    420
    421static int __uc_check_hw(struct intel_uc *uc)
    422{
    423	if (!intel_uc_supports_guc(uc))
    424		return 0;
    425
    426	/*
    427	 * We can silently continue without GuC only if it was never enabled
    428	 * before on this system after reboot, otherwise we risk GPU hangs.
    429	 * To check if GuC was loaded before we look at WOPCM registers.
    430	 */
    431	if (uc_is_wopcm_locked(uc))
    432		return -EIO;
    433
    434	return 0;
    435}
    436
    437static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw)
    438{
    439	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
    440
    441	drm_info(&i915->drm, "%s firmware %s version %u.%u\n",
    442		 intel_uc_fw_type_repr(fw->type), fw->path,
    443		 fw->major_ver_found, fw->minor_ver_found);
    444}
    445
    446static int __uc_init_hw(struct intel_uc *uc)
    447{
    448	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
    449	struct intel_guc *guc = &uc->guc;
    450	struct intel_huc *huc = &uc->huc;
    451	int ret, attempts;
    452
    453	GEM_BUG_ON(!intel_uc_supports_guc(uc));
    454	GEM_BUG_ON(!intel_uc_wants_guc(uc));
    455
    456	print_fw_ver(uc, &guc->fw);
    457
    458	if (intel_uc_uses_huc(uc))
    459		print_fw_ver(uc, &huc->fw);
    460
    461	if (!intel_uc_fw_is_loadable(&guc->fw)) {
    462		ret = __uc_check_hw(uc) ||
    463		      intel_uc_fw_is_overridden(&guc->fw) ||
    464		      intel_uc_wants_guc_submission(uc) ?
    465		      intel_uc_fw_status_to_error(guc->fw.status) : 0;
    466		goto err_out;
    467	}
    468
    469	ret = uc_init_wopcm(uc);
    470	if (ret)
    471		goto err_out;
    472
    473	intel_guc_reset_interrupts(guc);
    474
    475	/* WaEnableuKernelHeaderValidFix:skl */
    476	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
    477	if (GRAPHICS_VER(i915) == 9)
    478		attempts = 3;
    479	else
    480		attempts = 1;
    481
    482	intel_rps_raise_unslice(&uc_to_gt(uc)->rps);
    483
    484	while (attempts--) {
    485		/*
    486		 * Always reset the GuC just before (re)loading, so
    487		 * that the state and timing are fairly predictable
    488		 */
    489		ret = __uc_sanitize(uc);
    490		if (ret)
    491			goto err_out;
    492
    493		intel_huc_fw_upload(huc);
    494		intel_guc_ads_reset(guc);
    495		intel_guc_write_params(guc);
    496		ret = intel_guc_fw_upload(guc);
    497		if (ret == 0)
    498			break;
    499
    500		DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
    501				 "retry %d more time(s)\n", ret, attempts);
    502	}
    503
    504	/* Did we succeded or run out of retries? */
    505	if (ret)
    506		goto err_log_capture;
    507
    508	ret = guc_enable_communication(guc);
    509	if (ret)
    510		goto err_log_capture;
    511
    512	intel_huc_auth(huc);
    513
    514	if (intel_uc_uses_guc_submission(uc))
    515		intel_guc_submission_enable(guc);
    516
    517	if (intel_uc_uses_guc_slpc(uc)) {
    518		ret = intel_guc_slpc_enable(&guc->slpc);
    519		if (ret)
    520			goto err_submission;
    521	} else {
    522		/* Restore GT back to RPn for non-SLPC path */
    523		intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
    524	}
    525
    526	drm_info(&i915->drm, "GuC submission %s\n",
    527		 str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
    528	drm_info(&i915->drm, "GuC SLPC %s\n",
    529		 str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
    530
    531	return 0;
    532
    533	/*
    534	 * We've failed to load the firmware :(
    535	 */
    536err_submission:
    537	intel_guc_submission_disable(guc);
    538err_log_capture:
    539	__uc_capture_load_err_log(uc);
    540err_out:
    541	/* Return GT back to RPn */
    542	intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
    543
    544	__uc_sanitize(uc);
    545
    546	if (!ret) {
    547		drm_notice(&i915->drm, "GuC is uninitialized\n");
    548		/* We want to run without GuC submission */
    549		return 0;
    550	}
    551
    552	i915_probe_error(i915, "GuC initialization failed %d\n", ret);
    553
    554	/* We want to keep KMS alive */
    555	return -EIO;
    556}
    557
    558static void __uc_fini_hw(struct intel_uc *uc)
    559{
    560	struct intel_guc *guc = &uc->guc;
    561
    562	if (!intel_guc_is_fw_running(guc))
    563		return;
    564
    565	if (intel_uc_uses_guc_submission(uc))
    566		intel_guc_submission_disable(guc);
    567
    568	__uc_sanitize(uc);
    569}
    570
    571/**
    572 * intel_uc_reset_prepare - Prepare for reset
    573 * @uc: the intel_uc structure
    574 *
    575 * Preparing for full gpu reset.
    576 */
    577void intel_uc_reset_prepare(struct intel_uc *uc)
    578{
    579	struct intel_guc *guc = &uc->guc;
    580
    581	uc->reset_in_progress = true;
    582
    583	/* Nothing to do if GuC isn't supported */
    584	if (!intel_uc_supports_guc(uc))
    585		return;
    586
    587	/* Firmware expected to be running when this function is called */
    588	if (!intel_guc_is_ready(guc))
    589		goto sanitize;
    590
    591	if (intel_uc_uses_guc_submission(uc))
    592		intel_guc_submission_reset_prepare(guc);
    593
    594sanitize:
    595	__uc_sanitize(uc);
    596}
    597
    598void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled)
    599{
    600	struct intel_guc *guc = &uc->guc;
    601
    602	/* Firmware can not be running when this function is called  */
    603	if (intel_uc_uses_guc_submission(uc))
    604		intel_guc_submission_reset(guc, stalled);
    605}
    606
    607void intel_uc_reset_finish(struct intel_uc *uc)
    608{
    609	struct intel_guc *guc = &uc->guc;
    610
    611	uc->reset_in_progress = false;
    612
    613	/* Firmware expected to be running when this function is called */
    614	if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc))
    615		intel_guc_submission_reset_finish(guc);
    616}
    617
    618void intel_uc_cancel_requests(struct intel_uc *uc)
    619{
    620	struct intel_guc *guc = &uc->guc;
    621
    622	/* Firmware can not be running when this function is called  */
    623	if (intel_uc_uses_guc_submission(uc))
    624		intel_guc_submission_cancel_requests(guc);
    625}
    626
    627void intel_uc_runtime_suspend(struct intel_uc *uc)
    628{
    629	struct intel_guc *guc = &uc->guc;
    630
    631	if (!intel_guc_is_ready(guc))
    632		return;
    633
    634	/*
    635	 * Wait for any outstanding CTB before tearing down communication /w the
    636	 * GuC.
    637	 */
    638#define OUTSTANDING_CTB_TIMEOUT_PERIOD	(HZ / 5)
    639	intel_guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h,
    640				       false, OUTSTANDING_CTB_TIMEOUT_PERIOD);
    641	GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
    642
    643	guc_disable_communication(guc);
    644}
    645
    646void intel_uc_suspend(struct intel_uc *uc)
    647{
    648	struct intel_guc *guc = &uc->guc;
    649	intel_wakeref_t wakeref;
    650	int err;
    651
    652	if (!intel_guc_is_ready(guc))
    653		return;
    654
    655	with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) {
    656		err = intel_guc_suspend(guc);
    657		if (err)
    658			DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
    659	}
    660}
    661
    662static int __uc_resume(struct intel_uc *uc, bool enable_communication)
    663{
    664	struct intel_guc *guc = &uc->guc;
    665	struct intel_gt *gt = guc_to_gt(guc);
    666	int err;
    667
    668	if (!intel_guc_is_fw_running(guc))
    669		return 0;
    670
    671	/* Make sure we enable communication if and only if it's disabled */
    672	GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
    673
    674	if (enable_communication)
    675		guc_enable_communication(guc);
    676
    677	/* If we are only resuming GuC communication but not reloading
    678	 * GuC, we need to ensure the ARAT timer interrupt is enabled
    679	 * again. In case of GuC reload, it is enabled during SLPC enable.
    680	 */
    681	if (enable_communication && intel_uc_uses_guc_slpc(uc))
    682		intel_guc_pm_intrmsk_enable(gt);
    683
    684	err = intel_guc_resume(guc);
    685	if (err) {
    686		DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
    687		return err;
    688	}
    689
    690	return 0;
    691}
    692
    693int intel_uc_resume(struct intel_uc *uc)
    694{
    695	/*
    696	 * When coming out of S3/S4 we sanitize and re-init the HW, so
    697	 * communication is already re-enabled at this point.
    698	 */
    699	return __uc_resume(uc, false);
    700}
    701
    702int intel_uc_runtime_resume(struct intel_uc *uc)
    703{
    704	/*
    705	 * During runtime resume we don't sanitize, so we need to re-init
    706	 * communication as well.
    707	 */
    708	return __uc_resume(uc, true);
    709}
    710
    711static const struct intel_uc_ops uc_ops_off = {
    712	.init_hw = __uc_check_hw,
    713};
    714
    715static const struct intel_uc_ops uc_ops_on = {
    716	.sanitize = __uc_sanitize,
    717
    718	.init_fw = __uc_fetch_firmwares,
    719	.fini_fw = __uc_cleanup_firmwares,
    720
    721	.init = __uc_init,
    722	.fini = __uc_fini,
    723
    724	.init_hw = __uc_init_hw,
    725	.fini_hw = __uc_fini_hw,
    726};