cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_guc_slpc.c (17953B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2021 Intel Corporation
      4 */
      5
      6#include <drm/drm_cache.h>
      7#include <linux/string_helpers.h>
      8
      9#include "i915_drv.h"
     10#include "i915_reg.h"
     11#include "intel_guc_slpc.h"
     12#include "intel_mchbar_regs.h"
     13#include "gt/intel_gt.h"
     14#include "gt/intel_gt_regs.h"
     15#include "gt/intel_rps.h"
     16
     17static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
     18{
     19	return container_of(slpc, struct intel_guc, slpc);
     20}
     21
     22static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
     23{
     24	return guc_to_gt(slpc_to_guc(slpc));
     25}
     26
     27static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
     28{
     29	return slpc_to_gt(slpc)->i915;
     30}
     31
     32static bool __detect_slpc_supported(struct intel_guc *guc)
     33{
     34	/* GuC SLPC is unavailable for pre-Gen12 */
     35	return guc->submission_supported &&
     36		GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
     37}
     38
     39static bool __guc_slpc_selected(struct intel_guc *guc)
     40{
     41	if (!intel_guc_slpc_is_supported(guc))
     42		return false;
     43
     44	return guc->submission_selected;
     45}
     46
     47void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
     48{
     49	struct intel_guc *guc = slpc_to_guc(slpc);
     50
     51	slpc->supported = __detect_slpc_supported(guc);
     52	slpc->selected = __guc_slpc_selected(guc);
     53}
     54
     55static void slpc_mem_set_param(struct slpc_shared_data *data,
     56			       u32 id, u32 value)
     57{
     58	GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS);
     59	/*
     60	 * When the flag bit is set, corresponding value will be read
     61	 * and applied by SLPC.
     62	 */
     63	data->override_params.bits[id >> 5] |= (1 << (id % 32));
     64	data->override_params.values[id] = value;
     65}
     66
     67static void slpc_mem_set_enabled(struct slpc_shared_data *data,
     68				 u8 enable_id, u8 disable_id)
     69{
     70	/*
     71	 * Enabling a param involves setting the enable_id
     72	 * to 1 and disable_id to 0.
     73	 */
     74	slpc_mem_set_param(data, enable_id, 1);
     75	slpc_mem_set_param(data, disable_id, 0);
     76}
     77
     78static void slpc_mem_set_disabled(struct slpc_shared_data *data,
     79				  u8 enable_id, u8 disable_id)
     80{
     81	/*
     82	 * Disabling a param involves setting the enable_id
     83	 * to 0 and disable_id to 1.
     84	 */
     85	slpc_mem_set_param(data, disable_id, 1);
     86	slpc_mem_set_param(data, enable_id, 0);
     87}
     88
     89static u32 slpc_get_state(struct intel_guc_slpc *slpc)
     90{
     91	struct slpc_shared_data *data;
     92
     93	GEM_BUG_ON(!slpc->vma);
     94
     95	drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
     96	data = slpc->vaddr;
     97
     98	return data->header.global_state;
     99}
    100
    101static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
    102{
    103	u32 request[] = {
    104		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
    105		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
    106		id,
    107		value,
    108	};
    109	int ret;
    110
    111	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
    112
    113	return ret > 0 ? -EPROTO : ret;
    114}
    115
    116static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
    117{
    118	u32 request[] = {
    119		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
    120		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
    121		id,
    122	};
    123
    124	return intel_guc_send(guc, request, ARRAY_SIZE(request));
    125}
    126
    127static bool slpc_is_running(struct intel_guc_slpc *slpc)
    128{
    129	return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
    130}
    131
    132static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
    133{
    134	u32 request[] = {
    135		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
    136		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
    137		offset,
    138		0,
    139	};
    140	int ret;
    141
    142	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
    143
    144	return ret > 0 ? -EPROTO : ret;
    145}
    146
    147static int slpc_query_task_state(struct intel_guc_slpc *slpc)
    148{
    149	struct intel_guc *guc = slpc_to_guc(slpc);
    150	struct drm_i915_private *i915 = slpc_to_i915(slpc);
    151	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
    152	int ret;
    153
    154	ret = guc_action_slpc_query(guc, offset);
    155	if (unlikely(ret))
    156		i915_probe_error(i915, "Failed to query task state (%pe)\n",
    157				 ERR_PTR(ret));
    158
    159	drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
    160
    161	return ret;
    162}
    163
    164static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
    165{
    166	struct intel_guc *guc = slpc_to_guc(slpc);
    167	struct drm_i915_private *i915 = slpc_to_i915(slpc);
    168	int ret;
    169
    170	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
    171
    172	ret = guc_action_slpc_set_param(guc, id, value);
    173	if (ret)
    174		i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n",
    175				 id, value, ERR_PTR(ret));
    176
    177	return ret;
    178}
    179
    180static int slpc_unset_param(struct intel_guc_slpc *slpc,
    181			    u8 id)
    182{
    183	struct intel_guc *guc = slpc_to_guc(slpc);
    184
    185	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
    186
    187	return guc_action_slpc_unset_param(guc, id);
    188}
    189
    190static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
    191{
    192	struct drm_i915_private *i915 = slpc_to_i915(slpc);
    193	struct intel_guc *guc = slpc_to_guc(slpc);
    194	intel_wakeref_t wakeref;
    195	int ret = 0;
    196
    197	lockdep_assert_held(&slpc->lock);
    198
    199	if (!intel_guc_is_ready(guc))
    200		return -ENODEV;
    201
    202	/*
    203	 * This function is a little different as compared to
    204	 * intel_guc_slpc_set_min_freq(). Softlimit will not be updated
    205	 * here since this is used to temporarily change min freq,
    206	 * for example, during a waitboost. Caller is responsible for
    207	 * checking bounds.
    208	 */
    209
    210	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
    211		ret = slpc_set_param(slpc,
    212				     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
    213				     freq);
    214		if (ret)
    215			i915_probe_error(i915, "Unable to force min freq to %u: %d",
    216					 freq, ret);
    217	}
    218
    219	return ret;
    220}
    221
    222static void slpc_boost_work(struct work_struct *work)
    223{
    224	struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
    225
    226	/*
    227	 * Raise min freq to boost. It's possible that
    228	 * this is greater than current max. But it will
    229	 * certainly be limited by RP0. An error setting
    230	 * the min param is not fatal.
    231	 */
    232	mutex_lock(&slpc->lock);
    233	if (atomic_read(&slpc->num_waiters)) {
    234		slpc_force_min_freq(slpc, slpc->boost_freq);
    235		slpc->num_boosts++;
    236	}
    237	mutex_unlock(&slpc->lock);
    238}
    239
    240int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
    241{
    242	struct intel_guc *guc = slpc_to_guc(slpc);
    243	struct drm_i915_private *i915 = slpc_to_i915(slpc);
    244	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
    245	int err;
    246
    247	GEM_BUG_ON(slpc->vma);
    248
    249	err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
    250	if (unlikely(err)) {
    251		i915_probe_error(i915,
    252				 "Failed to allocate SLPC struct (err=%pe)\n",
    253				 ERR_PTR(err));
    254		return err;
    255	}
    256
    257	slpc->max_freq_softlimit = 0;
    258	slpc->min_freq_softlimit = 0;
    259
    260	slpc->boost_freq = 0;
    261	atomic_set(&slpc->num_waiters, 0);
    262	slpc->num_boosts = 0;
    263
    264	mutex_init(&slpc->lock);
    265	INIT_WORK(&slpc->boost_work, slpc_boost_work);
    266
    267	return err;
    268}
    269
    270static const char *slpc_global_state_to_string(enum slpc_global_state state)
    271{
    272	switch (state) {
    273	case SLPC_GLOBAL_STATE_NOT_RUNNING:
    274		return "not running";
    275	case SLPC_GLOBAL_STATE_INITIALIZING:
    276		return "initializing";
    277	case SLPC_GLOBAL_STATE_RESETTING:
    278		return "resetting";
    279	case SLPC_GLOBAL_STATE_RUNNING:
    280		return "running";
    281	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
    282		return "shutting down";
    283	case SLPC_GLOBAL_STATE_ERROR:
    284		return "error";
    285	default:
    286		return "unknown";
    287	}
    288}
    289
    290static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
    291{
    292	return slpc_global_state_to_string(slpc_get_state(slpc));
    293}
    294
    295static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
    296{
    297	u32 request[] = {
    298		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
    299		SLPC_EVENT(SLPC_EVENT_RESET, 2),
    300		offset,
    301		0,
    302	};
    303	int ret;
    304
    305	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
    306
    307	return ret > 0 ? -EPROTO : ret;
    308}
    309
    310static int slpc_reset(struct intel_guc_slpc *slpc)
    311{
    312	struct drm_i915_private *i915 = slpc_to_i915(slpc);
    313	struct intel_guc *guc = slpc_to_guc(slpc);
    314	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
    315	int ret;
    316
    317	ret = guc_action_slpc_reset(guc, offset);
    318
    319	if (unlikely(ret < 0)) {
    320		i915_probe_error(i915, "SLPC reset action failed (%pe)\n",
    321				 ERR_PTR(ret));
    322		return ret;
    323	}
    324
    325	if (!ret) {
    326		if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
    327			i915_probe_error(i915, "SLPC not enabled! State = %s\n",
    328					 slpc_get_state_string(slpc));
    329			return -EIO;
    330		}
    331	}
    332
    333	return 0;
    334}
    335
    336static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
    337{
    338	struct slpc_shared_data *data = slpc->vaddr;
    339
    340	GEM_BUG_ON(!slpc->vma);
    341
    342	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
    343				  data->task_state_data.freq) *
    344				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
    345}
    346
    347static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
    348{
    349	struct slpc_shared_data *data = slpc->vaddr;
    350
    351	GEM_BUG_ON(!slpc->vma);
    352
    353	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
    354				  data->task_state_data.freq) *
    355				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
    356}
    357
    358static void slpc_shared_data_reset(struct slpc_shared_data *data)
    359{
    360	memset(data, 0, sizeof(struct slpc_shared_data));
    361
    362	data->header.size = sizeof(struct slpc_shared_data);
    363
    364	/* Enable only GTPERF task, disable others */
    365	slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
    366			     SLPC_PARAM_TASK_DISABLE_GTPERF);
    367
    368	slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
    369			      SLPC_PARAM_TASK_DISABLE_BALANCER);
    370
    371	slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
    372			      SLPC_PARAM_TASK_DISABLE_DCC);
    373}
    374
    375/**
    376 * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC.
    377 * @slpc: pointer to intel_guc_slpc.
    378 * @val: frequency (MHz)
    379 *
    380 * This function will invoke GuC SLPC action to update the max frequency
    381 * limit for unslice.
    382 *
    383 * Return: 0 on success, non-zero error code on failure.
    384 */
    385int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
    386{
    387	struct drm_i915_private *i915 = slpc_to_i915(slpc);
    388	intel_wakeref_t wakeref;
    389	int ret;
    390
    391	if (val < slpc->min_freq ||
    392	    val > slpc->rp0_freq ||
    393	    val < slpc->min_freq_softlimit)
    394		return -EINVAL;
    395
    396	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
    397		ret = slpc_set_param(slpc,
    398				     SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
    399				     val);
    400
    401		/* Return standardized err code for sysfs calls */
    402		if (ret)
    403			ret = -EIO;
    404	}
    405
    406	if (!ret)
    407		slpc->max_freq_softlimit = val;
    408
    409	return ret;
    410}
    411
    412/**
    413 * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC.
    414 * @slpc: pointer to intel_guc_slpc.
    415 * @val: pointer to val which will hold max frequency (MHz)
    416 *
    417 * This function will invoke GuC SLPC action to read the max frequency
    418 * limit for unslice.
    419 *
    420 * Return: 0 on success, non-zero error code on failure.
    421 */
    422int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
    423{
    424	struct drm_i915_private *i915 = slpc_to_i915(slpc);
    425	intel_wakeref_t wakeref;
    426	int ret = 0;
    427
    428	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
    429		/* Force GuC to update task data */
    430		ret = slpc_query_task_state(slpc);
    431
    432		if (!ret)
    433			*val = slpc_decode_max_freq(slpc);
    434	}
    435
    436	return ret;
    437}
    438
    439/**
    440 * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
    441 * @slpc: pointer to intel_guc_slpc.
    442 * @val: frequency (MHz)
    443 *
    444 * This function will invoke GuC SLPC action to update the min unslice
    445 * frequency.
    446 *
    447 * Return: 0 on success, non-zero error code on failure.
    448 */
    449int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
    450{
    451	struct drm_i915_private *i915 = slpc_to_i915(slpc);
    452	intel_wakeref_t wakeref;
    453	int ret;
    454
    455	if (val < slpc->min_freq ||
    456	    val > slpc->rp0_freq ||
    457	    val > slpc->max_freq_softlimit)
    458		return -EINVAL;
    459
    460	/* Need a lock now since waitboost can be modifying min as well */
    461	mutex_lock(&slpc->lock);
    462
    463	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
    464
    465		ret = slpc_set_param(slpc,
    466				     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
    467				     val);
    468
    469		/* Return standardized err code for sysfs calls */
    470		if (ret)
    471			ret = -EIO;
    472	}
    473
    474	if (!ret)
    475		slpc->min_freq_softlimit = val;
    476
    477	mutex_unlock(&slpc->lock);
    478
    479	return ret;
    480}
    481
    482/**
    483 * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC.
    484 * @slpc: pointer to intel_guc_slpc.
    485 * @val: pointer to val which will hold min frequency (MHz)
    486 *
    487 * This function will invoke GuC SLPC action to read the min frequency
    488 * limit for unslice.
    489 *
    490 * Return: 0 on success, non-zero error code on failure.
    491 */
    492int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
    493{
    494	struct drm_i915_private *i915 = slpc_to_i915(slpc);
    495	intel_wakeref_t wakeref;
    496	int ret = 0;
    497
    498	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
    499		/* Force GuC to update task data */
    500		ret = slpc_query_task_state(slpc);
    501
    502		if (!ret)
    503			*val = slpc_decode_min_freq(slpc);
    504	}
    505
    506	return ret;
    507}
    508
    509void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
    510{
    511	u32 pm_intrmsk_mbz = 0;
    512
    513	/*
    514	 * Allow GuC to receive ARAT timer expiry event.
    515	 * This interrupt register is setup by RPS code
    516	 * when host based Turbo is enabled.
    517	 */
    518	pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
    519
    520	intel_uncore_rmw(gt->uncore,
    521			 GEN6_PMINTRMSK, pm_intrmsk_mbz, 0);
    522}
    523
    524static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
    525{
    526	int ret = 0;
    527
    528	/*
    529	 * Softlimits are initially equivalent to platform limits
    530	 * unless they have deviated from defaults, in which case,
    531	 * we retain the values and set min/max accordingly.
    532	 */
    533	if (!slpc->max_freq_softlimit)
    534		slpc->max_freq_softlimit = slpc->rp0_freq;
    535	else if (slpc->max_freq_softlimit != slpc->rp0_freq)
    536		ret = intel_guc_slpc_set_max_freq(slpc,
    537						  slpc->max_freq_softlimit);
    538
    539	if (unlikely(ret))
    540		return ret;
    541
    542	if (!slpc->min_freq_softlimit)
    543		slpc->min_freq_softlimit = slpc->min_freq;
    544	else if (slpc->min_freq_softlimit != slpc->min_freq)
    545		return intel_guc_slpc_set_min_freq(slpc,
    546						   slpc->min_freq_softlimit);
    547
    548	return 0;
    549}
    550
    551static int slpc_ignore_eff_freq(struct intel_guc_slpc *slpc, bool ignore)
    552{
    553	int ret = 0;
    554
    555	if (ignore) {
    556		ret = slpc_set_param(slpc,
    557				     SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
    558				     ignore);
    559		if (!ret)
    560			return slpc_set_param(slpc,
    561					      SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
    562					      slpc->min_freq);
    563	} else {
    564		ret = slpc_unset_param(slpc,
    565				       SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY);
    566		if (!ret)
    567			return slpc_unset_param(slpc,
    568						SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ);
    569	}
    570
    571	return ret;
    572}
    573
    574static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
    575{
    576	/* Force SLPC to used platform rp0 */
    577	return slpc_set_param(slpc,
    578			      SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
    579			      slpc->rp0_freq);
    580}
    581
    582static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
    583{
    584	struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
    585	struct intel_rps_freq_caps caps;
    586
    587	gen6_rps_get_freq_caps(rps, &caps);
    588	slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq);
    589	slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq);
    590	slpc->min_freq = intel_gpu_freq(rps, caps.min_freq);
    591
    592	if (!slpc->boost_freq)
    593		slpc->boost_freq = slpc->rp0_freq;
    594}
    595
    596/*
    597 * intel_guc_slpc_enable() - Start SLPC
    598 * @slpc: pointer to intel_guc_slpc.
    599 *
    600 * SLPC is enabled by setting up the shared data structure and
    601 * sending reset event to GuC SLPC. Initial data is setup in
    602 * intel_guc_slpc_init. Here we send the reset event. We do
    603 * not currently need a slpc_disable since this is taken care
    604 * of automatically when a reset/suspend occurs and the GuC
    605 * CTB is destroyed.
    606 *
    607 * Return: 0 on success, non-zero error code on failure.
    608 */
    609int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
    610{
    611	struct drm_i915_private *i915 = slpc_to_i915(slpc);
    612	int ret;
    613
    614	GEM_BUG_ON(!slpc->vma);
    615
    616	slpc_shared_data_reset(slpc->vaddr);
    617
    618	ret = slpc_reset(slpc);
    619	if (unlikely(ret < 0)) {
    620		i915_probe_error(i915, "SLPC Reset event returned (%pe)\n",
    621				 ERR_PTR(ret));
    622		return ret;
    623	}
    624
    625	ret = slpc_query_task_state(slpc);
    626	if (unlikely(ret < 0))
    627		return ret;
    628
    629	intel_guc_pm_intrmsk_enable(to_gt(i915));
    630
    631	slpc_get_rp_values(slpc);
    632
    633	/* Ignore efficient freq and set min to platform min */
    634	ret = slpc_ignore_eff_freq(slpc, true);
    635	if (unlikely(ret)) {
    636		i915_probe_error(i915, "Failed to set SLPC min to RPn (%pe)\n",
    637				 ERR_PTR(ret));
    638		return ret;
    639	}
    640
    641	/* Set SLPC max limit to RP0 */
    642	ret = slpc_use_fused_rp0(slpc);
    643	if (unlikely(ret)) {
    644		i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n",
    645				 ERR_PTR(ret));
    646		return ret;
    647	}
    648
    649	/* Revert SLPC min/max to softlimits if necessary */
    650	ret = slpc_set_softlimits(slpc);
    651	if (unlikely(ret)) {
    652		i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n",
    653				 ERR_PTR(ret));
    654		return ret;
    655	}
    656
    657	return 0;
    658}
    659
    660int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
    661{
    662	int ret = 0;
    663
    664	if (val < slpc->min_freq || val > slpc->rp0_freq)
    665		return -EINVAL;
    666
    667	mutex_lock(&slpc->lock);
    668
    669	if (slpc->boost_freq != val) {
    670		/* Apply only if there are active waiters */
    671		if (atomic_read(&slpc->num_waiters)) {
    672			ret = slpc_force_min_freq(slpc, val);
    673			if (ret) {
    674				ret = -EIO;
    675				goto done;
    676			}
    677		}
    678
    679		slpc->boost_freq = val;
    680	}
    681
    682done:
    683	mutex_unlock(&slpc->lock);
    684	return ret;
    685}
    686
    687void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
    688{
    689	/*
    690	 * Return min back to the softlimit.
    691	 * This is called during request retire,
    692	 * so we don't need to fail that if the
    693	 * set_param fails.
    694	 */
    695	mutex_lock(&slpc->lock);
    696	if (atomic_dec_and_test(&slpc->num_waiters))
    697		slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
    698	mutex_unlock(&slpc->lock);
    699}
    700
    701int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
    702{
    703	struct drm_i915_private *i915 = slpc_to_i915(slpc);
    704	struct slpc_shared_data *data = slpc->vaddr;
    705	struct slpc_task_state_data *slpc_tasks;
    706	intel_wakeref_t wakeref;
    707	int ret = 0;
    708
    709	GEM_BUG_ON(!slpc->vma);
    710
    711	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
    712		ret = slpc_query_task_state(slpc);
    713
    714		if (!ret) {
    715			slpc_tasks = &data->task_state_data;
    716
    717			drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
    718			drm_printf(p, "\tGTPERF task active: %s\n",
    719				   str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
    720			drm_printf(p, "\tMax freq: %u MHz\n",
    721				   slpc_decode_max_freq(slpc));
    722			drm_printf(p, "\tMin freq: %u MHz\n",
    723				   slpc_decode_min_freq(slpc));
    724			drm_printf(p, "\twaitboosts: %u\n",
    725				   slpc->num_boosts);
    726		}
    727	}
    728
    729	return ret;
    730}
    731
    732void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
    733{
    734	if (!slpc->vma)
    735		return;
    736
    737	i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);
    738}