cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_smu.c (78412B)


      1/*
      2 * Copyright 2019 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 */
     22
     23#define SWSMU_CODE_LAYER_L1
     24
     25#include <linux/firmware.h>
     26#include <linux/pci.h>
     27
     28#include "amdgpu.h"
     29#include "amdgpu_smu.h"
     30#include "smu_internal.h"
     31#include "atom.h"
     32#include "arcturus_ppt.h"
     33#include "navi10_ppt.h"
     34#include "sienna_cichlid_ppt.h"
     35#include "renoir_ppt.h"
     36#include "vangogh_ppt.h"
     37#include "aldebaran_ppt.h"
     38#include "yellow_carp_ppt.h"
     39#include "cyan_skillfish_ppt.h"
     40#include "smu_v13_0_0_ppt.h"
     41#include "smu_v13_0_4_ppt.h"
     42#include "smu_v13_0_5_ppt.h"
     43#include "smu_v13_0_7_ppt.h"
     44#include "amd_pcie.h"
     45
     46/*
     47 * DO NOT use these for err/warn/info/debug messages.
     48 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
     49 * They are more MGPU friendly.
     50 */
     51#undef pr_err
     52#undef pr_warn
     53#undef pr_info
     54#undef pr_debug
     55
     56static const struct amd_pm_funcs swsmu_pm_funcs;
     57static int smu_force_smuclk_levels(struct smu_context *smu,
     58				   enum smu_clk_type clk_type,
     59				   uint32_t mask);
     60static int smu_handle_task(struct smu_context *smu,
     61			   enum amd_dpm_forced_level level,
     62			   enum amd_pp_task task_id);
     63static int smu_reset(struct smu_context *smu);
     64static int smu_set_fan_speed_pwm(void *handle, u32 speed);
     65static int smu_set_fan_control_mode(void *handle, u32 value);
     66static int smu_set_power_limit(void *handle, uint32_t limit);
     67static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
     68static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
     69
     70static int smu_sys_get_pp_feature_mask(void *handle,
     71				       char *buf)
     72{
     73	struct smu_context *smu = handle;
     74
     75	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
     76		return -EOPNOTSUPP;
     77
     78	return smu_get_pp_feature_mask(smu, buf);
     79}
     80
     81static int smu_sys_set_pp_feature_mask(void *handle,
     82				       uint64_t new_mask)
     83{
     84	struct smu_context *smu = handle;
     85
     86	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
     87		return -EOPNOTSUPP;
     88
     89	return smu_set_pp_feature_mask(smu, new_mask);
     90}
     91
     92int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
     93{
     94	if (!smu->ppt_funcs->get_gfx_off_status)
     95		return -EINVAL;
     96
     97	*value = smu_get_gfx_off_status(smu);
     98
     99	return 0;
    100}
    101
    102int smu_set_soft_freq_range(struct smu_context *smu,
    103			    enum smu_clk_type clk_type,
    104			    uint32_t min,
    105			    uint32_t max)
    106{
    107	int ret = 0;
    108
    109	if (smu->ppt_funcs->set_soft_freq_limited_range)
    110		ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
    111								  clk_type,
    112								  min,
    113								  max);
    114
    115	return ret;
    116}
    117
    118int smu_get_dpm_freq_range(struct smu_context *smu,
    119			   enum smu_clk_type clk_type,
    120			   uint32_t *min,
    121			   uint32_t *max)
    122{
    123	int ret = -ENOTSUPP;
    124
    125	if (!min && !max)
    126		return -EINVAL;
    127
    128	if (smu->ppt_funcs->get_dpm_ultimate_freq)
    129		ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
    130							    clk_type,
    131							    min,
    132							    max);
    133
    134	return ret;
    135}
    136
    137static u32 smu_get_mclk(void *handle, bool low)
    138{
    139	struct smu_context *smu = handle;
    140	uint32_t clk_freq;
    141	int ret = 0;
    142
    143	ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
    144				     low ? &clk_freq : NULL,
    145				     !low ? &clk_freq : NULL);
    146	if (ret)
    147		return 0;
    148	return clk_freq * 100;
    149}
    150
    151static u32 smu_get_sclk(void *handle, bool low)
    152{
    153	struct smu_context *smu = handle;
    154	uint32_t clk_freq;
    155	int ret = 0;
    156
    157	ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
    158				     low ? &clk_freq : NULL,
    159				     !low ? &clk_freq : NULL);
    160	if (ret)
    161		return 0;
    162	return clk_freq * 100;
    163}
    164
    165static int smu_dpm_set_vcn_enable(struct smu_context *smu,
    166				  bool enable)
    167{
    168	struct smu_power_context *smu_power = &smu->smu_power;
    169	struct smu_power_gate *power_gate = &smu_power->power_gate;
    170	int ret = 0;
    171
    172	if (!smu->ppt_funcs->dpm_set_vcn_enable)
    173		return 0;
    174
    175	if (atomic_read(&power_gate->vcn_gated) ^ enable)
    176		return 0;
    177
    178	ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
    179	if (!ret)
    180		atomic_set(&power_gate->vcn_gated, !enable);
    181
    182	return ret;
    183}
    184
    185static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
    186				   bool enable)
    187{
    188	struct smu_power_context *smu_power = &smu->smu_power;
    189	struct smu_power_gate *power_gate = &smu_power->power_gate;
    190	int ret = 0;
    191
    192	if (!smu->ppt_funcs->dpm_set_jpeg_enable)
    193		return 0;
    194
    195	if (atomic_read(&power_gate->jpeg_gated) ^ enable)
    196		return 0;
    197
    198	ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
    199	if (!ret)
    200		atomic_set(&power_gate->jpeg_gated, !enable);
    201
    202	return ret;
    203}
    204
    205/**
    206 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
    207 *
    208 * @handle:        smu_context pointer
    209 * @block_type: the IP block to power gate/ungate
    210 * @gate:       to power gate if true, ungate otherwise
    211 *
    212 * This API uses no smu->mutex lock protection due to:
    213 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
    214 *    This is guarded to be race condition free by the caller.
    215 * 2. Or get called on user setting request of power_dpm_force_performance_level.
    216 *    Under this case, the smu->mutex lock protection is already enforced on
    217 *    the parent API smu_force_performance_level of the call path.
    218 */
    219static int smu_dpm_set_power_gate(void *handle,
    220				  uint32_t block_type,
    221				  bool gate)
    222{
    223	struct smu_context *smu = handle;
    224	int ret = 0;
    225
    226	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
    227		dev_WARN(smu->adev->dev,
    228			 "SMU uninitialized but power %s requested for %u!\n",
    229			 gate ? "gate" : "ungate", block_type);
    230		return -EOPNOTSUPP;
    231	}
    232
    233	switch (block_type) {
    234	/*
    235	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
    236	 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
    237	 */
    238	case AMD_IP_BLOCK_TYPE_UVD:
    239	case AMD_IP_BLOCK_TYPE_VCN:
    240		ret = smu_dpm_set_vcn_enable(smu, !gate);
    241		if (ret)
    242			dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
    243				gate ? "gate" : "ungate");
    244		break;
    245	case AMD_IP_BLOCK_TYPE_GFX:
    246		ret = smu_gfx_off_control(smu, gate);
    247		if (ret)
    248			dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
    249				gate ? "enable" : "disable");
    250		break;
    251	case AMD_IP_BLOCK_TYPE_SDMA:
    252		ret = smu_powergate_sdma(smu, gate);
    253		if (ret)
    254			dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
    255				gate ? "gate" : "ungate");
    256		break;
    257	case AMD_IP_BLOCK_TYPE_JPEG:
    258		ret = smu_dpm_set_jpeg_enable(smu, !gate);
    259		if (ret)
    260			dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
    261				gate ? "gate" : "ungate");
    262		break;
    263	default:
    264		dev_err(smu->adev->dev, "Unsupported block type!\n");
    265		return -EINVAL;
    266	}
    267
    268	return ret;
    269}
    270
    271/**
    272 * smu_set_user_clk_dependencies - set user profile clock dependencies
    273 *
    274 * @smu:	smu_context pointer
    275 * @clk:	enum smu_clk_type type
    276 *
    277 * Enable/Disable the clock dependency for the @clk type.
    278 */
    279static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
    280{
    281	if (smu->adev->in_suspend)
    282		return;
    283
    284	if (clk == SMU_MCLK) {
    285		smu->user_dpm_profile.clk_dependency = 0;
    286		smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
    287	} else if (clk == SMU_FCLK) {
    288		/* MCLK takes precedence over FCLK */
    289		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
    290			return;
    291
    292		smu->user_dpm_profile.clk_dependency = 0;
    293		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
    294	} else if (clk == SMU_SOCCLK) {
    295		/* MCLK takes precedence over SOCCLK */
    296		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
    297			return;
    298
    299		smu->user_dpm_profile.clk_dependency = 0;
    300		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
    301	} else
    302		/* Add clk dependencies here, if any */
    303		return;
    304}
    305
    306/**
    307 * smu_restore_dpm_user_profile - reinstate user dpm profile
    308 *
    309 * @smu:	smu_context pointer
    310 *
    311 * Restore the saved user power configurations include power limit,
    312 * clock frequencies, fan control mode and fan speed.
    313 */
    314static void smu_restore_dpm_user_profile(struct smu_context *smu)
    315{
    316	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
    317	int ret = 0;
    318
    319	if (!smu->adev->in_suspend)
    320		return;
    321
    322	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
    323		return;
    324
    325	/* Enable restore flag */
    326	smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
    327
    328	/* set the user dpm power limit */
    329	if (smu->user_dpm_profile.power_limit) {
    330		ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
    331		if (ret)
    332			dev_err(smu->adev->dev, "Failed to set power limit value\n");
    333	}
    334
    335	/* set the user dpm clock configurations */
    336	if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
    337		enum smu_clk_type clk_type;
    338
    339		for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
    340			/*
    341			 * Iterate over smu clk type and force the saved user clk
    342			 * configs, skip if clock dependency is enabled
    343			 */
    344			if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
    345					smu->user_dpm_profile.clk_mask[clk_type]) {
    346				ret = smu_force_smuclk_levels(smu, clk_type,
    347						smu->user_dpm_profile.clk_mask[clk_type]);
    348				if (ret)
    349					dev_err(smu->adev->dev,
    350						"Failed to set clock type = %d\n", clk_type);
    351			}
    352		}
    353	}
    354
    355	/* set the user dpm fan configurations */
    356	if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
    357	    smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
    358		ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
    359		if (ret != -EOPNOTSUPP) {
    360			smu->user_dpm_profile.fan_speed_pwm = 0;
    361			smu->user_dpm_profile.fan_speed_rpm = 0;
    362			smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
    363			dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
    364		}
    365
    366		if (smu->user_dpm_profile.fan_speed_pwm) {
    367			ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
    368			if (ret != -EOPNOTSUPP)
    369				dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
    370		}
    371
    372		if (smu->user_dpm_profile.fan_speed_rpm) {
    373			ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
    374			if (ret != -EOPNOTSUPP)
    375				dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
    376		}
    377	}
    378
    379	/* Restore user customized OD settings */
    380	if (smu->user_dpm_profile.user_od) {
    381		if (smu->ppt_funcs->restore_user_od_settings) {
    382			ret = smu->ppt_funcs->restore_user_od_settings(smu);
    383			if (ret)
    384				dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
    385		}
    386	}
    387
    388	/* Disable restore flag */
    389	smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
    390}
    391
    392static int smu_get_power_num_states(void *handle,
    393				    struct pp_states_info *state_info)
    394{
    395	if (!state_info)
    396		return -EINVAL;
    397
    398	/* not support power state */
    399	memset(state_info, 0, sizeof(struct pp_states_info));
    400	state_info->nums = 1;
    401	state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
    402
    403	return 0;
    404}
    405
    406bool is_support_sw_smu(struct amdgpu_device *adev)
    407{
    408	/* vega20 is 11.0.2, but it's supported via the powerplay code */
    409	if (adev->asic_type == CHIP_VEGA20)
    410		return false;
    411
    412	if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0))
    413		return true;
    414
    415	return false;
    416}
    417
    418bool is_support_cclk_dpm(struct amdgpu_device *adev)
    419{
    420	struct smu_context *smu = adev->powerplay.pp_handle;
    421
    422	if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
    423		return false;
    424
    425	return true;
    426}
    427
    428
    429static int smu_sys_get_pp_table(void *handle,
    430				char **table)
    431{
    432	struct smu_context *smu = handle;
    433	struct smu_table_context *smu_table = &smu->smu_table;
    434
    435	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
    436		return -EOPNOTSUPP;
    437
    438	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
    439		return -EINVAL;
    440
    441	if (smu_table->hardcode_pptable)
    442		*table = smu_table->hardcode_pptable;
    443	else
    444		*table = smu_table->power_play_table;
    445
    446	return smu_table->power_play_table_size;
    447}
    448
    449static int smu_sys_set_pp_table(void *handle,
    450				const char *buf,
    451				size_t size)
    452{
    453	struct smu_context *smu = handle;
    454	struct smu_table_context *smu_table = &smu->smu_table;
    455	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
    456	int ret = 0;
    457
    458	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
    459		return -EOPNOTSUPP;
    460
    461	if (header->usStructureSize != size) {
    462		dev_err(smu->adev->dev, "pp table size not matched !\n");
    463		return -EIO;
    464	}
    465
    466	if (!smu_table->hardcode_pptable) {
    467		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
    468		if (!smu_table->hardcode_pptable)
    469			return -ENOMEM;
    470	}
    471
    472	memcpy(smu_table->hardcode_pptable, buf, size);
    473	smu_table->power_play_table = smu_table->hardcode_pptable;
    474	smu_table->power_play_table_size = size;
    475
    476	/*
    477	 * Special hw_fini action(for Navi1x, the DPMs disablement will be
    478	 * skipped) may be needed for custom pptable uploading.
    479	 */
    480	smu->uploading_custom_pp_table = true;
    481
    482	ret = smu_reset(smu);
    483	if (ret)
    484		dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
    485
    486	smu->uploading_custom_pp_table = false;
    487
    488	return ret;
    489}
    490
    491static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
    492{
    493	struct smu_feature *feature = &smu->smu_feature;
    494	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
    495	int ret = 0;
    496
    497	/*
    498	 * With SCPM enabled, the allowed featuremasks setting(via
    499	 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
    500	 * That means there is no way to let PMFW knows the settings below.
    501	 * Thus, we just assume all the features are allowed under
    502	 * such scenario.
    503	 */
    504	if (smu->adev->scpm_enabled) {
    505		bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
    506		return 0;
    507	}
    508
    509	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
    510
    511	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
    512					     SMU_FEATURE_MAX/32);
    513	if (ret)
    514		return ret;
    515
    516	bitmap_or(feature->allowed, feature->allowed,
    517		      (unsigned long *)allowed_feature_mask,
    518		      feature->feature_num);
    519
    520	return ret;
    521}
    522
    523static int smu_set_funcs(struct amdgpu_device *adev)
    524{
    525	struct smu_context *smu = adev->powerplay.pp_handle;
    526
    527	if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
    528		smu->od_enabled = true;
    529
    530	switch (adev->ip_versions[MP1_HWIP][0]) {
    531	case IP_VERSION(11, 0, 0):
    532	case IP_VERSION(11, 0, 5):
    533	case IP_VERSION(11, 0, 9):
    534		navi10_set_ppt_funcs(smu);
    535		break;
    536	case IP_VERSION(11, 0, 7):
    537	case IP_VERSION(11, 0, 11):
    538	case IP_VERSION(11, 0, 12):
    539	case IP_VERSION(11, 0, 13):
    540		sienna_cichlid_set_ppt_funcs(smu);
    541		break;
    542	case IP_VERSION(12, 0, 0):
    543	case IP_VERSION(12, 0, 1):
    544		renoir_set_ppt_funcs(smu);
    545		break;
    546	case IP_VERSION(11, 5, 0):
    547		vangogh_set_ppt_funcs(smu);
    548		break;
    549	case IP_VERSION(13, 0, 1):
    550	case IP_VERSION(13, 0, 3):
    551	case IP_VERSION(13, 0, 8):
    552		yellow_carp_set_ppt_funcs(smu);
    553		break;
    554	case IP_VERSION(13, 0, 4):
    555		smu_v13_0_4_set_ppt_funcs(smu);
    556		break;
    557	case IP_VERSION(13, 0, 5):
    558		smu_v13_0_5_set_ppt_funcs(smu);
    559		break;
    560	case IP_VERSION(11, 0, 8):
    561		cyan_skillfish_set_ppt_funcs(smu);
    562		break;
    563	case IP_VERSION(11, 0, 2):
    564		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
    565		arcturus_set_ppt_funcs(smu);
    566		/* OD is not supported on Arcturus */
    567		smu->od_enabled =false;
    568		break;
    569	case IP_VERSION(13, 0, 2):
    570		aldebaran_set_ppt_funcs(smu);
    571		/* Enable pp_od_clk_voltage node */
    572		smu->od_enabled = true;
    573		break;
    574	case IP_VERSION(13, 0, 0):
    575		smu_v13_0_0_set_ppt_funcs(smu);
    576		break;
    577	case IP_VERSION(13, 0, 7):
    578		smu_v13_0_7_set_ppt_funcs(smu);
    579		break;
    580	default:
    581		return -EINVAL;
    582	}
    583
    584	return 0;
    585}
    586
    587static int smu_early_init(void *handle)
    588{
    589	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    590	struct smu_context *smu;
    591
    592	smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
    593	if (!smu)
    594		return -ENOMEM;
    595
    596	smu->adev = adev;
    597	smu->pm_enabled = !!amdgpu_dpm;
    598	smu->is_apu = false;
    599	smu->smu_baco.state = SMU_BACO_STATE_EXIT;
    600	smu->smu_baco.platform_support = false;
    601	smu->user_dpm_profile.fan_mode = -1;
    602
    603	mutex_init(&smu->message_lock);
    604
    605	adev->powerplay.pp_handle = smu;
    606	adev->powerplay.pp_funcs = &swsmu_pm_funcs;
    607
    608	return smu_set_funcs(adev);
    609}
    610
    611static int smu_set_default_dpm_table(struct smu_context *smu)
    612{
    613	struct smu_power_context *smu_power = &smu->smu_power;
    614	struct smu_power_gate *power_gate = &smu_power->power_gate;
    615	int vcn_gate, jpeg_gate;
    616	int ret = 0;
    617
    618	if (!smu->ppt_funcs->set_default_dpm_table)
    619		return 0;
    620
    621	vcn_gate = atomic_read(&power_gate->vcn_gated);
    622	jpeg_gate = atomic_read(&power_gate->jpeg_gated);
    623
    624	ret = smu_dpm_set_vcn_enable(smu, true);
    625	if (ret)
    626		return ret;
    627
    628	ret = smu_dpm_set_jpeg_enable(smu, true);
    629	if (ret)
    630		goto err_out;
    631
    632	ret = smu->ppt_funcs->set_default_dpm_table(smu);
    633	if (ret)
    634		dev_err(smu->adev->dev,
    635			"Failed to setup default dpm clock tables!\n");
    636
    637	smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
    638err_out:
    639	smu_dpm_set_vcn_enable(smu, !vcn_gate);
    640	return ret;
    641}
    642
    643static int smu_apply_default_config_table_settings(struct smu_context *smu)
    644{
    645	struct amdgpu_device *adev = smu->adev;
    646	int ret = 0;
    647
    648	ret = smu_get_default_config_table_settings(smu,
    649						    &adev->pm.config_table);
    650	if (ret)
    651		return ret;
    652
    653	return smu_set_config_table(smu, &adev->pm.config_table);
    654}
    655
    656static int smu_late_init(void *handle)
    657{
    658	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    659	struct smu_context *smu = adev->powerplay.pp_handle;
    660	int ret = 0;
    661
    662	smu_set_fine_grain_gfx_freq_parameters(smu);
    663
    664	if (!smu->pm_enabled)
    665		return 0;
    666
    667	ret = smu_post_init(smu);
    668	if (ret) {
    669		dev_err(adev->dev, "Failed to post smu init!\n");
    670		return ret;
    671	}
    672
    673	if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
    674	    (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
    675		return 0;
    676
    677	if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
    678		ret = smu_set_default_od_settings(smu);
    679		if (ret) {
    680			dev_err(adev->dev, "Failed to setup default OD settings!\n");
    681			return ret;
    682		}
    683	}
    684
    685	ret = smu_populate_umd_state_clk(smu);
    686	if (ret) {
    687		dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
    688		return ret;
    689	}
    690
    691	ret = smu_get_asic_power_limits(smu,
    692					&smu->current_power_limit,
    693					&smu->default_power_limit,
    694					&smu->max_power_limit);
    695	if (ret) {
    696		dev_err(adev->dev, "Failed to get asic power limits!\n");
    697		return ret;
    698	}
    699
    700	if (!amdgpu_sriov_vf(adev))
    701		smu_get_unique_id(smu);
    702
    703	smu_get_fan_parameters(smu);
    704
    705	smu_handle_task(smu,
    706			smu->smu_dpm.dpm_level,
    707			AMD_PP_TASK_COMPLETE_INIT);
    708
    709	ret = smu_apply_default_config_table_settings(smu);
    710	if (ret && (ret != -EOPNOTSUPP)) {
    711		dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
    712		return ret;
    713	}
    714
    715	smu_restore_dpm_user_profile(smu);
    716
    717	return 0;
    718}
    719
    720static int smu_init_fb_allocations(struct smu_context *smu)
    721{
    722	struct amdgpu_device *adev = smu->adev;
    723	struct smu_table_context *smu_table = &smu->smu_table;
    724	struct smu_table *tables = smu_table->tables;
    725	struct smu_table *driver_table = &(smu_table->driver_table);
    726	uint32_t max_table_size = 0;
    727	int ret, i;
    728
    729	/* VRAM allocation for tool table */
    730	if (tables[SMU_TABLE_PMSTATUSLOG].size) {
    731		ret = amdgpu_bo_create_kernel(adev,
    732					      tables[SMU_TABLE_PMSTATUSLOG].size,
    733					      tables[SMU_TABLE_PMSTATUSLOG].align,
    734					      tables[SMU_TABLE_PMSTATUSLOG].domain,
    735					      &tables[SMU_TABLE_PMSTATUSLOG].bo,
    736					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
    737					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
    738		if (ret) {
    739			dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
    740			return ret;
    741		}
    742	}
    743
    744	/* VRAM allocation for driver table */
    745	for (i = 0; i < SMU_TABLE_COUNT; i++) {
    746		if (tables[i].size == 0)
    747			continue;
    748
    749		if (i == SMU_TABLE_PMSTATUSLOG)
    750			continue;
    751
    752		if (max_table_size < tables[i].size)
    753			max_table_size = tables[i].size;
    754	}
    755
    756	driver_table->size = max_table_size;
    757	driver_table->align = PAGE_SIZE;
    758	driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
    759
    760	ret = amdgpu_bo_create_kernel(adev,
    761				      driver_table->size,
    762				      driver_table->align,
    763				      driver_table->domain,
    764				      &driver_table->bo,
    765				      &driver_table->mc_address,
    766				      &driver_table->cpu_addr);
    767	if (ret) {
    768		dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
    769		if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
    770			amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
    771					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
    772					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
    773	}
    774
    775	return ret;
    776}
    777
    778static int smu_fini_fb_allocations(struct smu_context *smu)
    779{
    780	struct smu_table_context *smu_table = &smu->smu_table;
    781	struct smu_table *tables = smu_table->tables;
    782	struct smu_table *driver_table = &(smu_table->driver_table);
    783
    784	if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
    785		amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
    786				      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
    787				      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
    788
    789	amdgpu_bo_free_kernel(&driver_table->bo,
    790			      &driver_table->mc_address,
    791			      &driver_table->cpu_addr);
    792
    793	return 0;
    794}
    795
    796/**
    797 * smu_alloc_memory_pool - allocate memory pool in the system memory
    798 *
    799 * @smu: amdgpu_device pointer
    800 *
    801 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
    802 * and DramLogSetDramAddr can notify it changed.
    803 *
    804 * Returns 0 on success, error on failure.
    805 */
    806static int smu_alloc_memory_pool(struct smu_context *smu)
    807{
    808	struct amdgpu_device *adev = smu->adev;
    809	struct smu_table_context *smu_table = &smu->smu_table;
    810	struct smu_table *memory_pool = &smu_table->memory_pool;
    811	uint64_t pool_size = smu->pool_size;
    812	int ret = 0;
    813
    814	if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
    815		return ret;
    816
    817	memory_pool->size = pool_size;
    818	memory_pool->align = PAGE_SIZE;
    819	memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
    820
    821	switch (pool_size) {
    822	case SMU_MEMORY_POOL_SIZE_256_MB:
    823	case SMU_MEMORY_POOL_SIZE_512_MB:
    824	case SMU_MEMORY_POOL_SIZE_1_GB:
    825	case SMU_MEMORY_POOL_SIZE_2_GB:
    826		ret = amdgpu_bo_create_kernel(adev,
    827					      memory_pool->size,
    828					      memory_pool->align,
    829					      memory_pool->domain,
    830					      &memory_pool->bo,
    831					      &memory_pool->mc_address,
    832					      &memory_pool->cpu_addr);
    833		if (ret)
    834			dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
    835		break;
    836	default:
    837		break;
    838	}
    839
    840	return ret;
    841}
    842
    843static int smu_free_memory_pool(struct smu_context *smu)
    844{
    845	struct smu_table_context *smu_table = &smu->smu_table;
    846	struct smu_table *memory_pool = &smu_table->memory_pool;
    847
    848	if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
    849		return 0;
    850
    851	amdgpu_bo_free_kernel(&memory_pool->bo,
    852			      &memory_pool->mc_address,
    853			      &memory_pool->cpu_addr);
    854
    855	memset(memory_pool, 0, sizeof(struct smu_table));
    856
    857	return 0;
    858}
    859
    860static int smu_alloc_dummy_read_table(struct smu_context *smu)
    861{
    862	struct smu_table_context *smu_table = &smu->smu_table;
    863	struct smu_table *dummy_read_1_table =
    864			&smu_table->dummy_read_1_table;
    865	struct amdgpu_device *adev = smu->adev;
    866	int ret = 0;
    867
    868	dummy_read_1_table->size = 0x40000;
    869	dummy_read_1_table->align = PAGE_SIZE;
    870	dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
    871
    872	ret = amdgpu_bo_create_kernel(adev,
    873				      dummy_read_1_table->size,
    874				      dummy_read_1_table->align,
    875				      dummy_read_1_table->domain,
    876				      &dummy_read_1_table->bo,
    877				      &dummy_read_1_table->mc_address,
    878				      &dummy_read_1_table->cpu_addr);
    879	if (ret)
    880		dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
    881
    882	return ret;
    883}
    884
    885static void smu_free_dummy_read_table(struct smu_context *smu)
    886{
    887	struct smu_table_context *smu_table = &smu->smu_table;
    888	struct smu_table *dummy_read_1_table =
    889			&smu_table->dummy_read_1_table;
    890
    891
    892	amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
    893			      &dummy_read_1_table->mc_address,
    894			      &dummy_read_1_table->cpu_addr);
    895
    896	memset(dummy_read_1_table, 0, sizeof(struct smu_table));
    897}
    898
    899static int smu_smc_table_sw_init(struct smu_context *smu)
    900{
    901	int ret;
    902
    903	/**
    904	 * Create smu_table structure, and init smc tables such as
    905	 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
    906	 */
    907	ret = smu_init_smc_tables(smu);
    908	if (ret) {
    909		dev_err(smu->adev->dev, "Failed to init smc tables!\n");
    910		return ret;
    911	}
    912
    913	/**
    914	 * Create smu_power_context structure, and allocate smu_dpm_context and
    915	 * context size to fill the smu_power_context data.
    916	 */
    917	ret = smu_init_power(smu);
    918	if (ret) {
    919		dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
    920		return ret;
    921	}
    922
    923	/*
    924	 * allocate vram bos to store smc table contents.
    925	 */
    926	ret = smu_init_fb_allocations(smu);
    927	if (ret)
    928		return ret;
    929
    930	ret = smu_alloc_memory_pool(smu);
    931	if (ret)
    932		return ret;
    933
    934	ret = smu_alloc_dummy_read_table(smu);
    935	if (ret)
    936		return ret;
    937
    938	ret = smu_i2c_init(smu);
    939	if (ret)
    940		return ret;
    941
    942	return 0;
    943}
    944
    945static int smu_smc_table_sw_fini(struct smu_context *smu)
    946{
    947	int ret;
    948
    949	smu_i2c_fini(smu);
    950
    951	smu_free_dummy_read_table(smu);
    952
    953	ret = smu_free_memory_pool(smu);
    954	if (ret)
    955		return ret;
    956
    957	ret = smu_fini_fb_allocations(smu);
    958	if (ret)
    959		return ret;
    960
    961	ret = smu_fini_power(smu);
    962	if (ret) {
    963		dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
    964		return ret;
    965	}
    966
    967	ret = smu_fini_smc_tables(smu);
    968	if (ret) {
    969		dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
    970		return ret;
    971	}
    972
    973	return 0;
    974}
    975
    976static void smu_throttling_logging_work_fn(struct work_struct *work)
    977{
    978	struct smu_context *smu = container_of(work, struct smu_context,
    979					       throttling_logging_work);
    980
    981	smu_log_thermal_throttling(smu);
    982}
    983
    984static void smu_interrupt_work_fn(struct work_struct *work)
    985{
    986	struct smu_context *smu = container_of(work, struct smu_context,
    987					       interrupt_work);
    988
    989	if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
    990		smu->ppt_funcs->interrupt_work(smu);
    991}
    992
    993static int smu_sw_init(void *handle)
    994{
    995	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    996	struct smu_context *smu = adev->powerplay.pp_handle;
    997	int ret;
    998
    999	smu->pool_size = adev->pm.smu_prv_buffer_size;
   1000	smu->smu_feature.feature_num = SMU_FEATURE_MAX;
   1001	bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
   1002	bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
   1003
   1004	INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
   1005	INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
   1006	atomic64_set(&smu->throttle_int_counter, 0);
   1007	smu->watermarks_bitmap = 0;
   1008	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
   1009	smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
   1010
   1011	atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
   1012	atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
   1013
   1014	smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
   1015	smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
   1016	smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
   1017	smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
   1018	smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
   1019	smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
   1020	smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
   1021	smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
   1022
   1023	smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
   1024	smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
   1025	smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
   1026	smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
   1027	smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
   1028	smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
   1029	smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
   1030	smu->display_config = &adev->pm.pm_display_cfg;
   1031
   1032	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
   1033	smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
   1034
   1035	ret = smu_init_microcode(smu);
   1036	if (ret) {
   1037		dev_err(adev->dev, "Failed to load smu firmware!\n");
   1038		return ret;
   1039	}
   1040
   1041	ret = smu_smc_table_sw_init(smu);
   1042	if (ret) {
   1043		dev_err(adev->dev, "Failed to sw init smc table!\n");
   1044		return ret;
   1045	}
   1046
   1047	/* get boot_values from vbios to set revision, gfxclk, and etc. */
   1048	ret = smu_get_vbios_bootup_values(smu);
   1049	if (ret) {
   1050		dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
   1051		return ret;
   1052	}
   1053
   1054	ret = smu_init_pptable_microcode(smu);
   1055	if (ret) {
   1056		dev_err(adev->dev, "Failed to setup pptable firmware!\n");
   1057		return ret;
   1058	}
   1059
   1060	ret = smu_register_irq_handler(smu);
   1061	if (ret) {
   1062		dev_err(adev->dev, "Failed to register smc irq handler!\n");
   1063		return ret;
   1064	}
   1065
   1066	/* If there is no way to query fan control mode, fan control is not supported */
   1067	if (!smu->ppt_funcs->get_fan_control_mode)
   1068		smu->adev->pm.no_fan = true;
   1069
   1070	return 0;
   1071}
   1072
   1073static int smu_sw_fini(void *handle)
   1074{
   1075	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1076	struct smu_context *smu = adev->powerplay.pp_handle;
   1077	int ret;
   1078
   1079	ret = smu_smc_table_sw_fini(smu);
   1080	if (ret) {
   1081		dev_err(adev->dev, "Failed to sw fini smc table!\n");
   1082		return ret;
   1083	}
   1084
   1085	smu_fini_microcode(smu);
   1086
   1087	return 0;
   1088}
   1089
   1090static int smu_get_thermal_temperature_range(struct smu_context *smu)
   1091{
   1092	struct amdgpu_device *adev = smu->adev;
   1093	struct smu_temperature_range *range =
   1094				&smu->thermal_range;
   1095	int ret = 0;
   1096
   1097	if (!smu->ppt_funcs->get_thermal_temperature_range)
   1098		return 0;
   1099
   1100	ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
   1101	if (ret)
   1102		return ret;
   1103
   1104	adev->pm.dpm.thermal.min_temp = range->min;
   1105	adev->pm.dpm.thermal.max_temp = range->max;
   1106	adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
   1107	adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
   1108	adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
   1109	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
   1110	adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
   1111	adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
   1112	adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
   1113
   1114	return ret;
   1115}
   1116
   1117static int smu_smc_hw_setup(struct smu_context *smu)
   1118{
   1119	struct smu_feature *feature = &smu->smu_feature;
   1120	struct amdgpu_device *adev = smu->adev;
   1121	uint32_t pcie_gen = 0, pcie_width = 0;
   1122	uint64_t features_supported;
   1123	int ret = 0;
   1124
   1125	if (adev->in_suspend && smu_is_dpm_running(smu)) {
   1126		dev_info(adev->dev, "dpm has been enabled\n");
   1127		/* this is needed specifically */
   1128		switch (adev->ip_versions[MP1_HWIP][0]) {
   1129		case IP_VERSION(11, 0, 7):
   1130		case IP_VERSION(11, 0, 11):
   1131		case IP_VERSION(11, 5, 0):
   1132		case IP_VERSION(11, 0, 12):
   1133			ret = smu_system_features_control(smu, true);
   1134			if (ret)
   1135				dev_err(adev->dev, "Failed system features control!\n");
   1136			break;
   1137		default:
   1138			break;
   1139		}
   1140		return ret;
   1141	}
   1142
   1143	ret = smu_init_display_count(smu, 0);
   1144	if (ret) {
   1145		dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
   1146		return ret;
   1147	}
   1148
   1149	ret = smu_set_driver_table_location(smu);
   1150	if (ret) {
   1151		dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
   1152		return ret;
   1153	}
   1154
   1155	/*
   1156	 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
   1157	 */
   1158	ret = smu_set_tool_table_location(smu);
   1159	if (ret) {
   1160		dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
   1161		return ret;
   1162	}
   1163
   1164	/*
   1165	 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
   1166	 * pool location.
   1167	 */
   1168	ret = smu_notify_memory_pool_location(smu);
   1169	if (ret) {
   1170		dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
   1171		return ret;
   1172	}
   1173
   1174	ret = smu_setup_pptable(smu);
   1175	if (ret) {
   1176		dev_err(adev->dev, "Failed to setup pptable!\n");
   1177		return ret;
   1178	}
   1179
   1180	/* smu_dump_pptable(smu); */
   1181
   1182	/*
   1183	 * With SCPM enabled, PSP is responsible for the PPTable transferring
   1184	 * (to SMU). Driver involvement is not needed and permitted.
   1185	 */
   1186	if (!adev->scpm_enabled) {
   1187		/*
   1188		 * Copy pptable bo in the vram to smc with SMU MSGs such as
   1189		 * SetDriverDramAddr and TransferTableDram2Smu.
   1190		 */
   1191		ret = smu_write_pptable(smu);
   1192		if (ret) {
   1193			dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
   1194			return ret;
   1195		}
   1196	}
   1197
   1198	/* issue Run*Btc msg */
   1199	ret = smu_run_btc(smu);
   1200	if (ret)
   1201		return ret;
   1202
   1203	/*
   1204	 * With SCPM enabled, these actions(and relevant messages) are
   1205	 * not needed and permitted.
   1206	 */
   1207	if (!adev->scpm_enabled) {
   1208		ret = smu_feature_set_allowed_mask(smu);
   1209		if (ret) {
   1210			dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
   1211			return ret;
   1212		}
   1213	}
   1214
   1215	ret = smu_system_features_control(smu, true);
   1216	if (ret) {
   1217		dev_err(adev->dev, "Failed to enable requested dpm features!\n");
   1218		return ret;
   1219	}
   1220
   1221	ret = smu_feature_get_enabled_mask(smu, &features_supported);
   1222	if (ret) {
   1223		dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
   1224		return ret;
   1225	}
   1226	bitmap_copy(feature->supported,
   1227		    (unsigned long *)&features_supported,
   1228		    feature->feature_num);
   1229
   1230	if (!smu_is_dpm_running(smu))
   1231		dev_info(adev->dev, "dpm has been disabled\n");
   1232
   1233	/*
   1234	 * Set initialized values (get from vbios) to dpm tables context such as
   1235	 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
   1236	 * type of clks.
   1237	 */
   1238	ret = smu_set_default_dpm_table(smu);
   1239	if (ret) {
   1240		dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
   1241		return ret;
   1242	}
   1243
   1244	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
   1245		pcie_gen = 3;
   1246	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
   1247		pcie_gen = 2;
   1248	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
   1249		pcie_gen = 1;
   1250	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
   1251		pcie_gen = 0;
   1252
   1253	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
   1254	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
   1255	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
   1256	 */
   1257	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
   1258		pcie_width = 6;
   1259	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
   1260		pcie_width = 5;
   1261	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
   1262		pcie_width = 4;
   1263	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
   1264		pcie_width = 3;
   1265	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
   1266		pcie_width = 2;
   1267	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
   1268		pcie_width = 1;
   1269	ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
   1270	if (ret) {
   1271		dev_err(adev->dev, "Attempt to override pcie params failed!\n");
   1272		return ret;
   1273	}
   1274
   1275	ret = smu_get_thermal_temperature_range(smu);
   1276	if (ret) {
   1277		dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
   1278		return ret;
   1279	}
   1280
   1281	ret = smu_enable_thermal_alert(smu);
   1282	if (ret) {
   1283		dev_err(adev->dev, "Failed to enable thermal alert!\n");
   1284		return ret;
   1285	}
   1286
   1287	ret = smu_notify_display_change(smu);
   1288	if (ret) {
   1289		dev_err(adev->dev, "Failed to notify display change!\n");
   1290		return ret;
   1291	}
   1292
   1293	/*
   1294	 * Set min deep sleep dce fclk with bootup value from vbios via
   1295	 * SetMinDeepSleepDcefclk MSG.
   1296	 */
   1297	ret = smu_set_min_dcef_deep_sleep(smu,
   1298					  smu->smu_table.boot_values.dcefclk / 100);
   1299
   1300	return ret;
   1301}
   1302
   1303static int smu_start_smc_engine(struct smu_context *smu)
   1304{
   1305	struct amdgpu_device *adev = smu->adev;
   1306	int ret = 0;
   1307
   1308	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
   1309		if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) {
   1310			if (smu->ppt_funcs->load_microcode) {
   1311				ret = smu->ppt_funcs->load_microcode(smu);
   1312				if (ret)
   1313					return ret;
   1314			}
   1315		}
   1316	}
   1317
   1318	if (smu->ppt_funcs->check_fw_status) {
   1319		ret = smu->ppt_funcs->check_fw_status(smu);
   1320		if (ret) {
   1321			dev_err(adev->dev, "SMC is not ready\n");
   1322			return ret;
   1323		}
   1324	}
   1325
   1326	/*
   1327	 * Send msg GetDriverIfVersion to check if the return value is equal
   1328	 * with DRIVER_IF_VERSION of smc header.
   1329	 */
   1330	ret = smu_check_fw_version(smu);
   1331	if (ret)
   1332		return ret;
   1333
   1334	return ret;
   1335}
   1336
   1337static int smu_hw_init(void *handle)
   1338{
   1339	int ret;
   1340	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1341	struct smu_context *smu = adev->powerplay.pp_handle;
   1342
   1343	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
   1344		smu->pm_enabled = false;
   1345		return 0;
   1346	}
   1347
   1348	ret = smu_start_smc_engine(smu);
   1349	if (ret) {
   1350		dev_err(adev->dev, "SMC engine is not correctly up!\n");
   1351		return ret;
   1352	}
   1353
   1354	if (smu->is_apu) {
   1355		smu_dpm_set_vcn_enable(smu, true);
   1356		smu_dpm_set_jpeg_enable(smu, true);
   1357		smu_set_gfx_cgpg(smu, true);
   1358	}
   1359
   1360	if (!smu->pm_enabled)
   1361		return 0;
   1362
   1363	ret = smu_get_driver_allowed_feature_mask(smu);
   1364	if (ret)
   1365		return ret;
   1366
   1367	ret = smu_smc_hw_setup(smu);
   1368	if (ret) {
   1369		dev_err(adev->dev, "Failed to setup smc hw!\n");
   1370		return ret;
   1371	}
   1372
   1373	/*
   1374	 * Move maximum sustainable clock retrieving here considering
   1375	 * 1. It is not needed on resume(from S3).
   1376	 * 2. DAL settings come between .hw_init and .late_init of SMU.
   1377	 *    And DAL needs to know the maximum sustainable clocks. Thus
   1378	 *    it cannot be put in .late_init().
   1379	 */
   1380	ret = smu_init_max_sustainable_clocks(smu);
   1381	if (ret) {
   1382		dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
   1383		return ret;
   1384	}
   1385
   1386	adev->pm.dpm_enabled = true;
   1387
   1388	dev_info(adev->dev, "SMU is initialized successfully!\n");
   1389
   1390	return 0;
   1391}
   1392
   1393static int smu_disable_dpms(struct smu_context *smu)
   1394{
   1395	struct amdgpu_device *adev = smu->adev;
   1396	int ret = 0;
   1397	bool use_baco = !smu->is_apu &&
   1398		((amdgpu_in_reset(adev) &&
   1399		  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
   1400		 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
   1401
   1402	/*
   1403	 * For custom pptable uploading, skip the DPM features
   1404	 * disable process on Navi1x ASICs.
   1405	 *   - As the gfx related features are under control of
   1406	 *     RLC on those ASICs. RLC reinitialization will be
   1407	 *     needed to reenable them. That will cost much more
   1408	 *     efforts.
   1409	 *
   1410	 *   - SMU firmware can handle the DPM reenablement
   1411	 *     properly.
   1412	 */
   1413	if (smu->uploading_custom_pp_table) {
   1414		switch (adev->ip_versions[MP1_HWIP][0]) {
   1415		case IP_VERSION(11, 0, 0):
   1416		case IP_VERSION(11, 0, 5):
   1417		case IP_VERSION(11, 0, 9):
   1418		case IP_VERSION(11, 0, 7):
   1419		case IP_VERSION(11, 0, 11):
   1420		case IP_VERSION(11, 5, 0):
   1421		case IP_VERSION(11, 0, 12):
   1422		case IP_VERSION(11, 0, 13):
   1423			return 0;
   1424		default:
   1425			break;
   1426		}
   1427	}
   1428
   1429	/*
   1430	 * For Sienna_Cichlid, PMFW will handle the features disablement properly
   1431	 * on BACO in. Driver involvement is unnecessary.
   1432	 */
   1433	if (use_baco) {
   1434		switch (adev->ip_versions[MP1_HWIP][0]) {
   1435		case IP_VERSION(11, 0, 7):
   1436		case IP_VERSION(11, 0, 0):
   1437		case IP_VERSION(11, 0, 5):
   1438		case IP_VERSION(11, 0, 9):
   1439		case IP_VERSION(13, 0, 0):
   1440			return 0;
   1441		default:
   1442			break;
   1443		}
   1444	}
   1445
   1446	/*
   1447	 * For gpu reset, runpm and hibernation through BACO,
   1448	 * BACO feature has to be kept enabled.
   1449	 */
   1450	if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
   1451		ret = smu_disable_all_features_with_exception(smu,
   1452							      SMU_FEATURE_BACO_BIT);
   1453		if (ret)
   1454			dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
   1455	} else {
   1456		/* DisableAllSmuFeatures message is not permitted with SCPM enabled */
   1457		if (!adev->scpm_enabled) {
   1458			ret = smu_system_features_control(smu, false);
   1459			if (ret)
   1460				dev_err(adev->dev, "Failed to disable smu features.\n");
   1461		}
   1462	}
   1463
   1464	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
   1465	    adev->gfx.rlc.funcs->stop)
   1466		adev->gfx.rlc.funcs->stop(adev);
   1467
   1468	return ret;
   1469}
   1470
   1471static int smu_smc_hw_cleanup(struct smu_context *smu)
   1472{
   1473	struct amdgpu_device *adev = smu->adev;
   1474	int ret = 0;
   1475
   1476	cancel_work_sync(&smu->throttling_logging_work);
   1477	cancel_work_sync(&smu->interrupt_work);
   1478
   1479	ret = smu_disable_thermal_alert(smu);
   1480	if (ret) {
   1481		dev_err(adev->dev, "Fail to disable thermal alert!\n");
   1482		return ret;
   1483	}
   1484
   1485	ret = smu_disable_dpms(smu);
   1486	if (ret) {
   1487		dev_err(adev->dev, "Fail to disable dpm features!\n");
   1488		return ret;
   1489	}
   1490
   1491	return 0;
   1492}
   1493
   1494static int smu_hw_fini(void *handle)
   1495{
   1496	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1497	struct smu_context *smu = adev->powerplay.pp_handle;
   1498
   1499	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
   1500		return 0;
   1501
   1502	smu_dpm_set_vcn_enable(smu, false);
   1503	smu_dpm_set_jpeg_enable(smu, false);
   1504
   1505	adev->vcn.cur_state = AMD_PG_STATE_GATE;
   1506	adev->jpeg.cur_state = AMD_PG_STATE_GATE;
   1507
   1508	if (!smu->pm_enabled)
   1509		return 0;
   1510
   1511	adev->pm.dpm_enabled = false;
   1512
   1513	return smu_smc_hw_cleanup(smu);
   1514}
   1515
   1516static void smu_late_fini(void *handle)
   1517{
   1518	struct amdgpu_device *adev = handle;
   1519	struct smu_context *smu = adev->powerplay.pp_handle;
   1520
   1521	kfree(smu);
   1522}
   1523
   1524static int smu_reset(struct smu_context *smu)
   1525{
   1526	struct amdgpu_device *adev = smu->adev;
   1527	int ret;
   1528
   1529	ret = smu_hw_fini(adev);
   1530	if (ret)
   1531		return ret;
   1532
   1533	ret = smu_hw_init(adev);
   1534	if (ret)
   1535		return ret;
   1536
   1537	ret = smu_late_init(adev);
   1538	if (ret)
   1539		return ret;
   1540
   1541	return 0;
   1542}
   1543
   1544static int smu_suspend(void *handle)
   1545{
   1546	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1547	struct smu_context *smu = adev->powerplay.pp_handle;
   1548	int ret;
   1549
   1550	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
   1551		return 0;
   1552
   1553	if (!smu->pm_enabled)
   1554		return 0;
   1555
   1556	adev->pm.dpm_enabled = false;
   1557
   1558	ret = smu_smc_hw_cleanup(smu);
   1559	if (ret)
   1560		return ret;
   1561
   1562	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
   1563
   1564	smu_set_gfx_cgpg(smu, false);
   1565
   1566	return 0;
   1567}
   1568
   1569static int smu_resume(void *handle)
   1570{
   1571	int ret;
   1572	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1573	struct smu_context *smu = adev->powerplay.pp_handle;
   1574
   1575	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
   1576		return 0;
   1577
   1578	if (!smu->pm_enabled)
   1579		return 0;
   1580
   1581	dev_info(adev->dev, "SMU is resuming...\n");
   1582
   1583	ret = smu_start_smc_engine(smu);
   1584	if (ret) {
   1585		dev_err(adev->dev, "SMC engine is not correctly up!\n");
   1586		return ret;
   1587	}
   1588
   1589	ret = smu_smc_hw_setup(smu);
   1590	if (ret) {
   1591		dev_err(adev->dev, "Failed to setup smc hw!\n");
   1592		return ret;
   1593	}
   1594
   1595	smu_set_gfx_cgpg(smu, true);
   1596
   1597	smu->disable_uclk_switch = 0;
   1598
   1599	adev->pm.dpm_enabled = true;
   1600
   1601	dev_info(adev->dev, "SMU is resumed successfully!\n");
   1602
   1603	return 0;
   1604}
   1605
   1606static int smu_display_configuration_change(void *handle,
   1607					    const struct amd_pp_display_configuration *display_config)
   1608{
   1609	struct smu_context *smu = handle;
   1610	int index = 0;
   1611	int num_of_active_display = 0;
   1612
   1613	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   1614		return -EOPNOTSUPP;
   1615
   1616	if (!display_config)
   1617		return -EINVAL;
   1618
   1619	smu_set_min_dcef_deep_sleep(smu,
   1620				    display_config->min_dcef_deep_sleep_set_clk / 100);
   1621
   1622	for (index = 0; index < display_config->num_path_including_non_display; index++) {
   1623		if (display_config->displays[index].controller_id != 0)
   1624			num_of_active_display++;
   1625	}
   1626
   1627	return 0;
   1628}
   1629
   1630static int smu_set_clockgating_state(void *handle,
   1631				     enum amd_clockgating_state state)
   1632{
   1633	return 0;
   1634}
   1635
   1636static int smu_set_powergating_state(void *handle,
   1637				     enum amd_powergating_state state)
   1638{
   1639	return 0;
   1640}
   1641
   1642static int smu_enable_umd_pstate(void *handle,
   1643		      enum amd_dpm_forced_level *level)
   1644{
   1645	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
   1646					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
   1647					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
   1648					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
   1649
   1650	struct smu_context *smu = (struct smu_context*)(handle);
   1651	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
   1652
   1653	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
   1654		return -EINVAL;
   1655
   1656	if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
   1657		/* enter umd pstate, save current level, disable gfx cg*/
   1658		if (*level & profile_mode_mask) {
   1659			smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
   1660			smu_gpo_control(smu, false);
   1661			smu_gfx_ulv_control(smu, false);
   1662			smu_deep_sleep_control(smu, false);
   1663			amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
   1664		}
   1665	} else {
   1666		/* exit umd pstate, restore level, enable gfx cg*/
   1667		if (!(*level & profile_mode_mask)) {
   1668			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
   1669				*level = smu_dpm_ctx->saved_dpm_level;
   1670			amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
   1671			smu_deep_sleep_control(smu, true);
   1672			smu_gfx_ulv_control(smu, true);
   1673			smu_gpo_control(smu, true);
   1674		}
   1675	}
   1676
   1677	return 0;
   1678}
   1679
   1680static int smu_bump_power_profile_mode(struct smu_context *smu,
   1681					   long *param,
   1682					   uint32_t param_size)
   1683{
   1684	int ret = 0;
   1685
   1686	if (smu->ppt_funcs->set_power_profile_mode)
   1687		ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
   1688
   1689	return ret;
   1690}
   1691
   1692static int smu_adjust_power_state_dynamic(struct smu_context *smu,
   1693				   enum amd_dpm_forced_level level,
   1694				   bool skip_display_settings)
   1695{
   1696	int ret = 0;
   1697	int index = 0;
   1698	long workload;
   1699	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
   1700
   1701	if (!skip_display_settings) {
   1702		ret = smu_display_config_changed(smu);
   1703		if (ret) {
   1704			dev_err(smu->adev->dev, "Failed to change display config!");
   1705			return ret;
   1706		}
   1707	}
   1708
   1709	ret = smu_apply_clocks_adjust_rules(smu);
   1710	if (ret) {
   1711		dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
   1712		return ret;
   1713	}
   1714
   1715	if (!skip_display_settings) {
   1716		ret = smu_notify_smc_display_config(smu);
   1717		if (ret) {
   1718			dev_err(smu->adev->dev, "Failed to notify smc display config!");
   1719			return ret;
   1720		}
   1721	}
   1722
   1723	if (smu_dpm_ctx->dpm_level != level) {
   1724		ret = smu_asic_set_performance_level(smu, level);
   1725		if (ret) {
   1726			dev_err(smu->adev->dev, "Failed to set performance level!");
   1727			return ret;
   1728		}
   1729
   1730		/* update the saved copy */
   1731		smu_dpm_ctx->dpm_level = level;
   1732	}
   1733
   1734	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
   1735		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
   1736		index = fls(smu->workload_mask);
   1737		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
   1738		workload = smu->workload_setting[index];
   1739
   1740		if (smu->power_profile_mode != workload)
   1741			smu_bump_power_profile_mode(smu, &workload, 0);
   1742	}
   1743
   1744	return ret;
   1745}
   1746
   1747static int smu_handle_task(struct smu_context *smu,
   1748			   enum amd_dpm_forced_level level,
   1749			   enum amd_pp_task task_id)
   1750{
   1751	int ret = 0;
   1752
   1753	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   1754		return -EOPNOTSUPP;
   1755
   1756	switch (task_id) {
   1757	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
   1758		ret = smu_pre_display_config_changed(smu);
   1759		if (ret)
   1760			return ret;
   1761		ret = smu_adjust_power_state_dynamic(smu, level, false);
   1762		break;
   1763	case AMD_PP_TASK_COMPLETE_INIT:
   1764	case AMD_PP_TASK_READJUST_POWER_STATE:
   1765		ret = smu_adjust_power_state_dynamic(smu, level, true);
   1766		break;
   1767	default:
   1768		break;
   1769	}
   1770
   1771	return ret;
   1772}
   1773
   1774static int smu_handle_dpm_task(void *handle,
   1775			       enum amd_pp_task task_id,
   1776			       enum amd_pm_state_type *user_state)
   1777{
   1778	struct smu_context *smu = handle;
   1779	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
   1780
   1781	return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
   1782
   1783}
   1784
   1785static int smu_switch_power_profile(void *handle,
   1786				    enum PP_SMC_POWER_PROFILE type,
   1787				    bool en)
   1788{
   1789	struct smu_context *smu = handle;
   1790	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
   1791	long workload;
   1792	uint32_t index;
   1793
   1794	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   1795		return -EOPNOTSUPP;
   1796
   1797	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
   1798		return -EINVAL;
   1799
   1800	if (!en) {
   1801		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
   1802		index = fls(smu->workload_mask);
   1803		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
   1804		workload = smu->workload_setting[index];
   1805	} else {
   1806		smu->workload_mask |= (1 << smu->workload_prority[type]);
   1807		index = fls(smu->workload_mask);
   1808		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
   1809		workload = smu->workload_setting[index];
   1810	}
   1811
   1812	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
   1813		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
   1814		smu_bump_power_profile_mode(smu, &workload, 0);
   1815
   1816	return 0;
   1817}
   1818
   1819static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
   1820{
   1821	struct smu_context *smu = handle;
   1822	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
   1823
   1824	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   1825		return -EOPNOTSUPP;
   1826
   1827	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
   1828		return -EINVAL;
   1829
   1830	return smu_dpm_ctx->dpm_level;
   1831}
   1832
   1833static int smu_force_performance_level(void *handle,
   1834				       enum amd_dpm_forced_level level)
   1835{
   1836	struct smu_context *smu = handle;
   1837	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
   1838	int ret = 0;
   1839
   1840	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   1841		return -EOPNOTSUPP;
   1842
   1843	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
   1844		return -EINVAL;
   1845
   1846	ret = smu_enable_umd_pstate(smu, &level);
   1847	if (ret)
   1848		return ret;
   1849
   1850	ret = smu_handle_task(smu, level,
   1851			      AMD_PP_TASK_READJUST_POWER_STATE);
   1852
   1853	/* reset user dpm clock state */
   1854	if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
   1855		memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
   1856		smu->user_dpm_profile.clk_dependency = 0;
   1857	}
   1858
   1859	return ret;
   1860}
   1861
   1862static int smu_set_display_count(void *handle, uint32_t count)
   1863{
   1864	struct smu_context *smu = handle;
   1865
   1866	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   1867		return -EOPNOTSUPP;
   1868
   1869	return smu_init_display_count(smu, count);
   1870}
   1871
   1872static int smu_force_smuclk_levels(struct smu_context *smu,
   1873			 enum smu_clk_type clk_type,
   1874			 uint32_t mask)
   1875{
   1876	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
   1877	int ret = 0;
   1878
   1879	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   1880		return -EOPNOTSUPP;
   1881
   1882	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
   1883		dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
   1884		return -EINVAL;
   1885	}
   1886
   1887	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
   1888		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
   1889		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
   1890			smu->user_dpm_profile.clk_mask[clk_type] = mask;
   1891			smu_set_user_clk_dependencies(smu, clk_type);
   1892		}
   1893	}
   1894
   1895	return ret;
   1896}
   1897
   1898static int smu_force_ppclk_levels(void *handle,
   1899				  enum pp_clock_type type,
   1900				  uint32_t mask)
   1901{
   1902	struct smu_context *smu = handle;
   1903	enum smu_clk_type clk_type;
   1904
   1905	switch (type) {
   1906	case PP_SCLK:
   1907		clk_type = SMU_SCLK; break;
   1908	case PP_MCLK:
   1909		clk_type = SMU_MCLK; break;
   1910	case PP_PCIE:
   1911		clk_type = SMU_PCIE; break;
   1912	case PP_SOCCLK:
   1913		clk_type = SMU_SOCCLK; break;
   1914	case PP_FCLK:
   1915		clk_type = SMU_FCLK; break;
   1916	case PP_DCEFCLK:
   1917		clk_type = SMU_DCEFCLK; break;
   1918	case PP_VCLK:
   1919		clk_type = SMU_VCLK; break;
   1920	case PP_DCLK:
   1921		clk_type = SMU_DCLK; break;
   1922	case OD_SCLK:
   1923		clk_type = SMU_OD_SCLK; break;
   1924	case OD_MCLK:
   1925		clk_type = SMU_OD_MCLK; break;
   1926	case OD_VDDC_CURVE:
   1927		clk_type = SMU_OD_VDDC_CURVE; break;
   1928	case OD_RANGE:
   1929		clk_type = SMU_OD_RANGE; break;
   1930	default:
   1931		return -EINVAL;
   1932	}
   1933
   1934	return smu_force_smuclk_levels(smu, clk_type, mask);
   1935}
   1936
   1937/*
   1938 * On system suspending or resetting, the dpm_enabled
   1939 * flag will be cleared. So that those SMU services which
   1940 * are not supported will be gated.
   1941 * However, the mp1 state setting should still be granted
   1942 * even if the dpm_enabled cleared.
   1943 */
   1944static int smu_set_mp1_state(void *handle,
   1945			     enum pp_mp1_state mp1_state)
   1946{
   1947	struct smu_context *smu = handle;
   1948	int ret = 0;
   1949
   1950	if (!smu->pm_enabled)
   1951		return -EOPNOTSUPP;
   1952
   1953	if (smu->ppt_funcs &&
   1954	    smu->ppt_funcs->set_mp1_state)
   1955		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
   1956
   1957	return ret;
   1958}
   1959
   1960static int smu_set_df_cstate(void *handle,
   1961			     enum pp_df_cstate state)
   1962{
   1963	struct smu_context *smu = handle;
   1964	int ret = 0;
   1965
   1966	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   1967		return -EOPNOTSUPP;
   1968
   1969	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
   1970		return 0;
   1971
   1972	ret = smu->ppt_funcs->set_df_cstate(smu, state);
   1973	if (ret)
   1974		dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
   1975
   1976	return ret;
   1977}
   1978
   1979int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
   1980{
   1981	int ret = 0;
   1982
   1983	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   1984		return -EOPNOTSUPP;
   1985
   1986	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
   1987		return 0;
   1988
   1989	ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
   1990	if (ret)
   1991		dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
   1992
   1993	return ret;
   1994}
   1995
   1996int smu_write_watermarks_table(struct smu_context *smu)
   1997{
   1998	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   1999		return -EOPNOTSUPP;
   2000
   2001	return smu_set_watermarks_table(smu, NULL);
   2002}
   2003
   2004static int smu_set_watermarks_for_clock_ranges(void *handle,
   2005					       struct pp_smu_wm_range_sets *clock_ranges)
   2006{
   2007	struct smu_context *smu = handle;
   2008
   2009	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2010		return -EOPNOTSUPP;
   2011
   2012	if (smu->disable_watermark)
   2013		return 0;
   2014
   2015	return smu_set_watermarks_table(smu, clock_ranges);
   2016}
   2017
   2018int smu_set_ac_dc(struct smu_context *smu)
   2019{
   2020	int ret = 0;
   2021
   2022	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2023		return -EOPNOTSUPP;
   2024
   2025	/* controlled by firmware */
   2026	if (smu->dc_controlled_by_gpio)
   2027		return 0;
   2028
   2029	ret = smu_set_power_source(smu,
   2030				   smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
   2031				   SMU_POWER_SOURCE_DC);
   2032	if (ret)
   2033		dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
   2034		       smu->adev->pm.ac_power ? "AC" : "DC");
   2035
   2036	return ret;
   2037}
   2038
   2039const struct amd_ip_funcs smu_ip_funcs = {
   2040	.name = "smu",
   2041	.early_init = smu_early_init,
   2042	.late_init = smu_late_init,
   2043	.sw_init = smu_sw_init,
   2044	.sw_fini = smu_sw_fini,
   2045	.hw_init = smu_hw_init,
   2046	.hw_fini = smu_hw_fini,
   2047	.late_fini = smu_late_fini,
   2048	.suspend = smu_suspend,
   2049	.resume = smu_resume,
   2050	.is_idle = NULL,
   2051	.check_soft_reset = NULL,
   2052	.wait_for_idle = NULL,
   2053	.soft_reset = NULL,
   2054	.set_clockgating_state = smu_set_clockgating_state,
   2055	.set_powergating_state = smu_set_powergating_state,
   2056};
   2057
   2058const struct amdgpu_ip_block_version smu_v11_0_ip_block =
   2059{
   2060	.type = AMD_IP_BLOCK_TYPE_SMC,
   2061	.major = 11,
   2062	.minor = 0,
   2063	.rev = 0,
   2064	.funcs = &smu_ip_funcs,
   2065};
   2066
   2067const struct amdgpu_ip_block_version smu_v12_0_ip_block =
   2068{
   2069	.type = AMD_IP_BLOCK_TYPE_SMC,
   2070	.major = 12,
   2071	.minor = 0,
   2072	.rev = 0,
   2073	.funcs = &smu_ip_funcs,
   2074};
   2075
   2076const struct amdgpu_ip_block_version smu_v13_0_ip_block =
   2077{
   2078	.type = AMD_IP_BLOCK_TYPE_SMC,
   2079	.major = 13,
   2080	.minor = 0,
   2081	.rev = 0,
   2082	.funcs = &smu_ip_funcs,
   2083};
   2084
   2085static int smu_load_microcode(void *handle)
   2086{
   2087	struct smu_context *smu = handle;
   2088	struct amdgpu_device *adev = smu->adev;
   2089	int ret = 0;
   2090
   2091	if (!smu->pm_enabled)
   2092		return -EOPNOTSUPP;
   2093
   2094	/* This should be used for non PSP loading */
   2095	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
   2096		return 0;
   2097
   2098	if (smu->ppt_funcs->load_microcode) {
   2099		ret = smu->ppt_funcs->load_microcode(smu);
   2100		if (ret) {
   2101			dev_err(adev->dev, "Load microcode failed\n");
   2102			return ret;
   2103		}
   2104	}
   2105
   2106	if (smu->ppt_funcs->check_fw_status) {
   2107		ret = smu->ppt_funcs->check_fw_status(smu);
   2108		if (ret) {
   2109			dev_err(adev->dev, "SMC is not ready\n");
   2110			return ret;
   2111		}
   2112	}
   2113
   2114	return ret;
   2115}
   2116
   2117static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
   2118{
   2119	int ret = 0;
   2120
   2121	if (smu->ppt_funcs->set_gfx_cgpg)
   2122		ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
   2123
   2124	return ret;
   2125}
   2126
   2127static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
   2128{
   2129	struct smu_context *smu = handle;
   2130	int ret = 0;
   2131
   2132	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2133		return -EOPNOTSUPP;
   2134
   2135	if (!smu->ppt_funcs->set_fan_speed_rpm)
   2136		return -EOPNOTSUPP;
   2137
   2138	if (speed == U32_MAX)
   2139		return -EINVAL;
   2140
   2141	ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
   2142	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
   2143		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
   2144		smu->user_dpm_profile.fan_speed_rpm = speed;
   2145
   2146		/* Override custom PWM setting as they cannot co-exist */
   2147		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
   2148		smu->user_dpm_profile.fan_speed_pwm = 0;
   2149	}
   2150
   2151	return ret;
   2152}
   2153
   2154/**
   2155 * smu_get_power_limit - Request one of the SMU Power Limits
   2156 *
   2157 * @handle: pointer to smu context
   2158 * @limit: requested limit is written back to this variable
   2159 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
   2160 * @pp_power_type: &pp_power_type type of power
   2161 * Return:  0 on success, <0 on error
   2162 *
   2163 */
   2164int smu_get_power_limit(void *handle,
   2165			uint32_t *limit,
   2166			enum pp_power_limit_level pp_limit_level,
   2167			enum pp_power_type pp_power_type)
   2168{
   2169	struct smu_context *smu = handle;
   2170	struct amdgpu_device *adev = smu->adev;
   2171	enum smu_ppt_limit_level limit_level;
   2172	uint32_t limit_type;
   2173	int ret = 0;
   2174
   2175	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2176		return -EOPNOTSUPP;
   2177
   2178	switch(pp_power_type) {
   2179	case PP_PWR_TYPE_SUSTAINED:
   2180		limit_type = SMU_DEFAULT_PPT_LIMIT;
   2181		break;
   2182	case PP_PWR_TYPE_FAST:
   2183		limit_type = SMU_FAST_PPT_LIMIT;
   2184		break;
   2185	default:
   2186		return -EOPNOTSUPP;
   2187		break;
   2188	}
   2189
   2190	switch(pp_limit_level){
   2191	case PP_PWR_LIMIT_CURRENT:
   2192		limit_level = SMU_PPT_LIMIT_CURRENT;
   2193		break;
   2194	case PP_PWR_LIMIT_DEFAULT:
   2195		limit_level = SMU_PPT_LIMIT_DEFAULT;
   2196		break;
   2197	case PP_PWR_LIMIT_MAX:
   2198		limit_level = SMU_PPT_LIMIT_MAX;
   2199		break;
   2200	case PP_PWR_LIMIT_MIN:
   2201	default:
   2202		return -EOPNOTSUPP;
   2203		break;
   2204	}
   2205
   2206	if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
   2207		if (smu->ppt_funcs->get_ppt_limit)
   2208			ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
   2209	} else {
   2210		switch (limit_level) {
   2211		case SMU_PPT_LIMIT_CURRENT:
   2212			switch (adev->ip_versions[MP1_HWIP][0]) {
   2213			case IP_VERSION(13, 0, 2):
   2214			case IP_VERSION(11, 0, 7):
   2215			case IP_VERSION(11, 0, 11):
   2216			case IP_VERSION(11, 0, 12):
   2217			case IP_VERSION(11, 0, 13):
   2218				ret = smu_get_asic_power_limits(smu,
   2219								&smu->current_power_limit,
   2220								NULL,
   2221								NULL);
   2222				break;
   2223			default:
   2224				break;
   2225			}
   2226			*limit = smu->current_power_limit;
   2227			break;
   2228		case SMU_PPT_LIMIT_DEFAULT:
   2229			*limit = smu->default_power_limit;
   2230			break;
   2231		case SMU_PPT_LIMIT_MAX:
   2232			*limit = smu->max_power_limit;
   2233			break;
   2234		default:
   2235			break;
   2236		}
   2237	}
   2238
   2239	return ret;
   2240}
   2241
   2242static int smu_set_power_limit(void *handle, uint32_t limit)
   2243{
   2244	struct smu_context *smu = handle;
   2245	uint32_t limit_type = limit >> 24;
   2246	int ret = 0;
   2247
   2248	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2249		return -EOPNOTSUPP;
   2250
   2251	limit &= (1<<24)-1;
   2252	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
   2253		if (smu->ppt_funcs->set_power_limit)
   2254			return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
   2255
   2256	if (limit > smu->max_power_limit) {
   2257		dev_err(smu->adev->dev,
   2258			"New power limit (%d) is over the max allowed %d\n",
   2259			limit, smu->max_power_limit);
   2260		return -EINVAL;
   2261	}
   2262
   2263	if (!limit)
   2264		limit = smu->current_power_limit;
   2265
   2266	if (smu->ppt_funcs->set_power_limit) {
   2267		ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
   2268		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
   2269			smu->user_dpm_profile.power_limit = limit;
   2270	}
   2271
   2272	return ret;
   2273}
   2274
   2275static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
   2276{
   2277	int ret = 0;
   2278
   2279	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2280		return -EOPNOTSUPP;
   2281
   2282	if (smu->ppt_funcs->print_clk_levels)
   2283		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
   2284
   2285	return ret;
   2286}
   2287
   2288static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
   2289{
   2290	enum smu_clk_type clk_type;
   2291
   2292	switch (type) {
   2293	case PP_SCLK:
   2294		clk_type = SMU_SCLK; break;
   2295	case PP_MCLK:
   2296		clk_type = SMU_MCLK; break;
   2297	case PP_PCIE:
   2298		clk_type = SMU_PCIE; break;
   2299	case PP_SOCCLK:
   2300		clk_type = SMU_SOCCLK; break;
   2301	case PP_FCLK:
   2302		clk_type = SMU_FCLK; break;
   2303	case PP_DCEFCLK:
   2304		clk_type = SMU_DCEFCLK; break;
   2305	case PP_VCLK:
   2306		clk_type = SMU_VCLK; break;
   2307	case PP_DCLK:
   2308		clk_type = SMU_DCLK; break;
   2309	case OD_SCLK:
   2310		clk_type = SMU_OD_SCLK; break;
   2311	case OD_MCLK:
   2312		clk_type = SMU_OD_MCLK; break;
   2313	case OD_VDDC_CURVE:
   2314		clk_type = SMU_OD_VDDC_CURVE; break;
   2315	case OD_RANGE:
   2316		clk_type = SMU_OD_RANGE; break;
   2317	case OD_VDDGFX_OFFSET:
   2318		clk_type = SMU_OD_VDDGFX_OFFSET; break;
   2319	case OD_CCLK:
   2320		clk_type = SMU_OD_CCLK; break;
   2321	default:
   2322		clk_type = SMU_CLK_COUNT; break;
   2323	}
   2324
   2325	return clk_type;
   2326}
   2327
   2328static int smu_print_ppclk_levels(void *handle,
   2329				  enum pp_clock_type type,
   2330				  char *buf)
   2331{
   2332	struct smu_context *smu = handle;
   2333	enum smu_clk_type clk_type;
   2334
   2335	clk_type = smu_convert_to_smuclk(type);
   2336	if (clk_type == SMU_CLK_COUNT)
   2337		return -EINVAL;
   2338
   2339	return smu_print_smuclk_levels(smu, clk_type, buf);
   2340}
   2341
   2342static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
   2343{
   2344	struct smu_context *smu = handle;
   2345	enum smu_clk_type clk_type;
   2346
   2347	clk_type = smu_convert_to_smuclk(type);
   2348	if (clk_type == SMU_CLK_COUNT)
   2349		return -EINVAL;
   2350
   2351	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2352		return -EOPNOTSUPP;
   2353
   2354	if (!smu->ppt_funcs->emit_clk_levels)
   2355		return -ENOENT;
   2356
   2357	return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
   2358
   2359}
   2360
   2361static int smu_od_edit_dpm_table(void *handle,
   2362				 enum PP_OD_DPM_TABLE_COMMAND type,
   2363				 long *input, uint32_t size)
   2364{
   2365	struct smu_context *smu = handle;
   2366	int ret = 0;
   2367
   2368	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2369		return -EOPNOTSUPP;
   2370
   2371	if (smu->ppt_funcs->od_edit_dpm_table) {
   2372		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
   2373	}
   2374
   2375	return ret;
   2376}
   2377
   2378static int smu_read_sensor(void *handle,
   2379			   int sensor,
   2380			   void *data,
   2381			   int *size_arg)
   2382{
   2383	struct smu_context *smu = handle;
   2384	struct smu_umd_pstate_table *pstate_table =
   2385				&smu->pstate_table;
   2386	int ret = 0;
   2387	uint32_t *size, size_val;
   2388
   2389	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2390		return -EOPNOTSUPP;
   2391
   2392	if (!data || !size_arg)
   2393		return -EINVAL;
   2394
   2395	size_val = *size_arg;
   2396	size = &size_val;
   2397
   2398	if (smu->ppt_funcs->read_sensor)
   2399		if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
   2400			goto unlock;
   2401
   2402	switch (sensor) {
   2403	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
   2404		*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
   2405		*size = 4;
   2406		break;
   2407	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
   2408		*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
   2409		*size = 4;
   2410		break;
   2411	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
   2412		ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
   2413		*size = 8;
   2414		break;
   2415	case AMDGPU_PP_SENSOR_UVD_POWER:
   2416		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
   2417		*size = 4;
   2418		break;
   2419	case AMDGPU_PP_SENSOR_VCE_POWER:
   2420		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
   2421		*size = 4;
   2422		break;
   2423	case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
   2424		*(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
   2425		*size = 4;
   2426		break;
   2427	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
   2428		*(uint32_t *)data = 0;
   2429		*size = 4;
   2430		break;
   2431	default:
   2432		*size = 0;
   2433		ret = -EOPNOTSUPP;
   2434		break;
   2435	}
   2436
   2437unlock:
   2438	// assign uint32_t to int
   2439	*size_arg = size_val;
   2440
   2441	return ret;
   2442}
   2443
   2444static int smu_get_power_profile_mode(void *handle, char *buf)
   2445{
   2446	struct smu_context *smu = handle;
   2447
   2448	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
   2449	    !smu->ppt_funcs->get_power_profile_mode)
   2450		return -EOPNOTSUPP;
   2451	if (!buf)
   2452		return -EINVAL;
   2453
   2454	return smu->ppt_funcs->get_power_profile_mode(smu, buf);
   2455}
   2456
   2457static int smu_set_power_profile_mode(void *handle,
   2458				      long *param,
   2459				      uint32_t param_size)
   2460{
   2461	struct smu_context *smu = handle;
   2462
   2463	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
   2464	    !smu->ppt_funcs->set_power_profile_mode)
   2465		return -EOPNOTSUPP;
   2466
   2467	return smu_bump_power_profile_mode(smu, param, param_size);
   2468}
   2469
   2470
   2471static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
   2472{
   2473	struct smu_context *smu = handle;
   2474
   2475	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2476		return -EOPNOTSUPP;
   2477
   2478	if (!smu->ppt_funcs->get_fan_control_mode)
   2479		return -EOPNOTSUPP;
   2480
   2481	if (!fan_mode)
   2482		return -EINVAL;
   2483
   2484	*fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
   2485
   2486	return 0;
   2487}
   2488
   2489static int smu_set_fan_control_mode(void *handle, u32 value)
   2490{
   2491	struct smu_context *smu = handle;
   2492	int ret = 0;
   2493
   2494	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2495		return -EOPNOTSUPP;
   2496
   2497	if (!smu->ppt_funcs->set_fan_control_mode)
   2498		return -EOPNOTSUPP;
   2499
   2500	if (value == U32_MAX)
   2501		return -EINVAL;
   2502
   2503	ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
   2504	if (ret)
   2505		goto out;
   2506
   2507	if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
   2508		smu->user_dpm_profile.fan_mode = value;
   2509
   2510		/* reset user dpm fan speed */
   2511		if (value != AMD_FAN_CTRL_MANUAL) {
   2512			smu->user_dpm_profile.fan_speed_pwm = 0;
   2513			smu->user_dpm_profile.fan_speed_rpm = 0;
   2514			smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
   2515		}
   2516	}
   2517
   2518out:
   2519	return ret;
   2520}
   2521
   2522static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
   2523{
   2524	struct smu_context *smu = handle;
   2525	int ret = 0;
   2526
   2527	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2528		return -EOPNOTSUPP;
   2529
   2530	if (!smu->ppt_funcs->get_fan_speed_pwm)
   2531		return -EOPNOTSUPP;
   2532
   2533	if (!speed)
   2534		return -EINVAL;
   2535
   2536	ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
   2537
   2538	return ret;
   2539}
   2540
   2541static int smu_set_fan_speed_pwm(void *handle, u32 speed)
   2542{
   2543	struct smu_context *smu = handle;
   2544	int ret = 0;
   2545
   2546	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2547		return -EOPNOTSUPP;
   2548
   2549	if (!smu->ppt_funcs->set_fan_speed_pwm)
   2550		return -EOPNOTSUPP;
   2551
   2552	if (speed == U32_MAX)
   2553		return -EINVAL;
   2554
   2555	ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
   2556	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
   2557		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
   2558		smu->user_dpm_profile.fan_speed_pwm = speed;
   2559
   2560		/* Override custom RPM setting as they cannot co-exist */
   2561		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
   2562		smu->user_dpm_profile.fan_speed_rpm = 0;
   2563	}
   2564
   2565	return ret;
   2566}
   2567
   2568static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
   2569{
   2570	struct smu_context *smu = handle;
   2571	int ret = 0;
   2572
   2573	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2574		return -EOPNOTSUPP;
   2575
   2576	if (!smu->ppt_funcs->get_fan_speed_rpm)
   2577		return -EOPNOTSUPP;
   2578
   2579	if (!speed)
   2580		return -EINVAL;
   2581
   2582	ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
   2583
   2584	return ret;
   2585}
   2586
   2587static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
   2588{
   2589	struct smu_context *smu = handle;
   2590
   2591	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2592		return -EOPNOTSUPP;
   2593
   2594	return smu_set_min_dcef_deep_sleep(smu, clk);
   2595}
   2596
   2597static int smu_get_clock_by_type_with_latency(void *handle,
   2598					      enum amd_pp_clock_type type,
   2599					      struct pp_clock_levels_with_latency *clocks)
   2600{
   2601	struct smu_context *smu = handle;
   2602	enum smu_clk_type clk_type;
   2603	int ret = 0;
   2604
   2605	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2606		return -EOPNOTSUPP;
   2607
   2608	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
   2609		switch (type) {
   2610		case amd_pp_sys_clock:
   2611			clk_type = SMU_GFXCLK;
   2612			break;
   2613		case amd_pp_mem_clock:
   2614			clk_type = SMU_MCLK;
   2615			break;
   2616		case amd_pp_dcef_clock:
   2617			clk_type = SMU_DCEFCLK;
   2618			break;
   2619		case amd_pp_disp_clock:
   2620			clk_type = SMU_DISPCLK;
   2621			break;
   2622		default:
   2623			dev_err(smu->adev->dev, "Invalid clock type!\n");
   2624			return -EINVAL;
   2625		}
   2626
   2627		ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
   2628	}
   2629
   2630	return ret;
   2631}
   2632
   2633static int smu_display_clock_voltage_request(void *handle,
   2634					     struct pp_display_clock_request *clock_req)
   2635{
   2636	struct smu_context *smu = handle;
   2637	int ret = 0;
   2638
   2639	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2640		return -EOPNOTSUPP;
   2641
   2642	if (smu->ppt_funcs->display_clock_voltage_request)
   2643		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
   2644
   2645	return ret;
   2646}
   2647
   2648
   2649static int smu_display_disable_memory_clock_switch(void *handle,
   2650						   bool disable_memory_clock_switch)
   2651{
   2652	struct smu_context *smu = handle;
   2653	int ret = -EINVAL;
   2654
   2655	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2656		return -EOPNOTSUPP;
   2657
   2658	if (smu->ppt_funcs->display_disable_memory_clock_switch)
   2659		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
   2660
   2661	return ret;
   2662}
   2663
   2664static int smu_set_xgmi_pstate(void *handle,
   2665			       uint32_t pstate)
   2666{
   2667	struct smu_context *smu = handle;
   2668	int ret = 0;
   2669
   2670	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2671		return -EOPNOTSUPP;
   2672
   2673	if (smu->ppt_funcs->set_xgmi_pstate)
   2674		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
   2675
   2676	if(ret)
   2677		dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
   2678
   2679	return ret;
   2680}
   2681
   2682static int smu_get_baco_capability(void *handle, bool *cap)
   2683{
   2684	struct smu_context *smu = handle;
   2685
   2686	*cap = false;
   2687
   2688	if (!smu->pm_enabled)
   2689		return 0;
   2690
   2691	if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
   2692		*cap = smu->ppt_funcs->baco_is_support(smu);
   2693
   2694	return 0;
   2695}
   2696
   2697static int smu_baco_set_state(void *handle, int state)
   2698{
   2699	struct smu_context *smu = handle;
   2700	int ret = 0;
   2701
   2702	if (!smu->pm_enabled)
   2703		return -EOPNOTSUPP;
   2704
   2705	if (state == 0) {
   2706		if (smu->ppt_funcs->baco_exit)
   2707			ret = smu->ppt_funcs->baco_exit(smu);
   2708	} else if (state == 1) {
   2709		if (smu->ppt_funcs->baco_enter)
   2710			ret = smu->ppt_funcs->baco_enter(smu);
   2711	} else {
   2712		return -EINVAL;
   2713	}
   2714
   2715	if (ret)
   2716		dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
   2717				(state)?"enter":"exit");
   2718
   2719	return ret;
   2720}
   2721
   2722bool smu_mode1_reset_is_support(struct smu_context *smu)
   2723{
   2724	bool ret = false;
   2725
   2726	if (!smu->pm_enabled)
   2727		return false;
   2728
   2729	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
   2730		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
   2731
   2732	return ret;
   2733}
   2734
   2735bool smu_mode2_reset_is_support(struct smu_context *smu)
   2736{
   2737	bool ret = false;
   2738
   2739	if (!smu->pm_enabled)
   2740		return false;
   2741
   2742	if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
   2743		ret = smu->ppt_funcs->mode2_reset_is_support(smu);
   2744
   2745	return ret;
   2746}
   2747
   2748int smu_mode1_reset(struct smu_context *smu)
   2749{
   2750	int ret = 0;
   2751
   2752	if (!smu->pm_enabled)
   2753		return -EOPNOTSUPP;
   2754
   2755	if (smu->ppt_funcs->mode1_reset)
   2756		ret = smu->ppt_funcs->mode1_reset(smu);
   2757
   2758	return ret;
   2759}
   2760
   2761static int smu_mode2_reset(void *handle)
   2762{
   2763	struct smu_context *smu = handle;
   2764	int ret = 0;
   2765
   2766	if (!smu->pm_enabled)
   2767		return -EOPNOTSUPP;
   2768
   2769	if (smu->ppt_funcs->mode2_reset)
   2770		ret = smu->ppt_funcs->mode2_reset(smu);
   2771
   2772	if (ret)
   2773		dev_err(smu->adev->dev, "Mode2 reset failed!\n");
   2774
   2775	return ret;
   2776}
   2777
   2778static int smu_get_max_sustainable_clocks_by_dc(void *handle,
   2779						struct pp_smu_nv_clock_table *max_clocks)
   2780{
   2781	struct smu_context *smu = handle;
   2782	int ret = 0;
   2783
   2784	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2785		return -EOPNOTSUPP;
   2786
   2787	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
   2788		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
   2789
   2790	return ret;
   2791}
   2792
   2793static int smu_get_uclk_dpm_states(void *handle,
   2794				   unsigned int *clock_values_in_khz,
   2795				   unsigned int *num_states)
   2796{
   2797	struct smu_context *smu = handle;
   2798	int ret = 0;
   2799
   2800	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2801		return -EOPNOTSUPP;
   2802
   2803	if (smu->ppt_funcs->get_uclk_dpm_states)
   2804		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
   2805
   2806	return ret;
   2807}
   2808
   2809static enum amd_pm_state_type smu_get_current_power_state(void *handle)
   2810{
   2811	struct smu_context *smu = handle;
   2812	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
   2813
   2814	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2815		return -EOPNOTSUPP;
   2816
   2817	if (smu->ppt_funcs->get_current_power_state)
   2818		pm_state = smu->ppt_funcs->get_current_power_state(smu);
   2819
   2820	return pm_state;
   2821}
   2822
   2823static int smu_get_dpm_clock_table(void *handle,
   2824				   struct dpm_clocks *clock_table)
   2825{
   2826	struct smu_context *smu = handle;
   2827	int ret = 0;
   2828
   2829	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2830		return -EOPNOTSUPP;
   2831
   2832	if (smu->ppt_funcs->get_dpm_clock_table)
   2833		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
   2834
   2835	return ret;
   2836}
   2837
   2838static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
   2839{
   2840	struct smu_context *smu = handle;
   2841
   2842	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2843		return -EOPNOTSUPP;
   2844
   2845	if (!smu->ppt_funcs->get_gpu_metrics)
   2846		return -EOPNOTSUPP;
   2847
   2848	return smu->ppt_funcs->get_gpu_metrics(smu, table);
   2849}
   2850
   2851static int smu_enable_mgpu_fan_boost(void *handle)
   2852{
   2853	struct smu_context *smu = handle;
   2854	int ret = 0;
   2855
   2856	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
   2857		return -EOPNOTSUPP;
   2858
   2859	if (smu->ppt_funcs->enable_mgpu_fan_boost)
   2860		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
   2861
   2862	return ret;
   2863}
   2864
   2865static int smu_gfx_state_change_set(void *handle,
   2866				    uint32_t state)
   2867{
   2868	struct smu_context *smu = handle;
   2869	int ret = 0;
   2870
   2871	if (smu->ppt_funcs->gfx_state_change_set)
   2872		ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
   2873
   2874	return ret;
   2875}
   2876
   2877int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
   2878{
   2879	int ret = 0;
   2880
   2881	if (smu->ppt_funcs->smu_handle_passthrough_sbr)
   2882		ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
   2883
   2884	return ret;
   2885}
   2886
   2887int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
   2888{
   2889	int ret = -EOPNOTSUPP;
   2890
   2891	if (smu->ppt_funcs &&
   2892		smu->ppt_funcs->get_ecc_info)
   2893		ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
   2894
   2895	return ret;
   2896
   2897}
   2898
   2899static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
   2900{
   2901	struct smu_context *smu = handle;
   2902	struct smu_table_context *smu_table = &smu->smu_table;
   2903	struct smu_table *memory_pool = &smu_table->memory_pool;
   2904
   2905	if (!addr || !size)
   2906		return -EINVAL;
   2907
   2908	*addr = NULL;
   2909	*size = 0;
   2910	if (memory_pool->bo) {
   2911		*addr = memory_pool->cpu_addr;
   2912		*size = memory_pool->size;
   2913	}
   2914
   2915	return 0;
   2916}
   2917
   2918static const struct amd_pm_funcs swsmu_pm_funcs = {
   2919	/* export for sysfs */
   2920	.set_fan_control_mode    = smu_set_fan_control_mode,
   2921	.get_fan_control_mode    = smu_get_fan_control_mode,
   2922	.set_fan_speed_pwm   = smu_set_fan_speed_pwm,
   2923	.get_fan_speed_pwm   = smu_get_fan_speed_pwm,
   2924	.force_clock_level       = smu_force_ppclk_levels,
   2925	.print_clock_levels      = smu_print_ppclk_levels,
   2926	.emit_clock_levels       = smu_emit_ppclk_levels,
   2927	.force_performance_level = smu_force_performance_level,
   2928	.read_sensor             = smu_read_sensor,
   2929	.get_performance_level   = smu_get_performance_level,
   2930	.get_current_power_state = smu_get_current_power_state,
   2931	.get_fan_speed_rpm       = smu_get_fan_speed_rpm,
   2932	.set_fan_speed_rpm       = smu_set_fan_speed_rpm,
   2933	.get_pp_num_states       = smu_get_power_num_states,
   2934	.get_pp_table            = smu_sys_get_pp_table,
   2935	.set_pp_table            = smu_sys_set_pp_table,
   2936	.switch_power_profile    = smu_switch_power_profile,
   2937	/* export to amdgpu */
   2938	.dispatch_tasks          = smu_handle_dpm_task,
   2939	.load_firmware           = smu_load_microcode,
   2940	.set_powergating_by_smu  = smu_dpm_set_power_gate,
   2941	.set_power_limit         = smu_set_power_limit,
   2942	.get_power_limit         = smu_get_power_limit,
   2943	.get_power_profile_mode  = smu_get_power_profile_mode,
   2944	.set_power_profile_mode  = smu_set_power_profile_mode,
   2945	.odn_edit_dpm_table      = smu_od_edit_dpm_table,
   2946	.set_mp1_state           = smu_set_mp1_state,
   2947	.gfx_state_change_set    = smu_gfx_state_change_set,
   2948	/* export to DC */
   2949	.get_sclk                         = smu_get_sclk,
   2950	.get_mclk                         = smu_get_mclk,
   2951	.display_configuration_change     = smu_display_configuration_change,
   2952	.get_clock_by_type_with_latency   = smu_get_clock_by_type_with_latency,
   2953	.display_clock_voltage_request    = smu_display_clock_voltage_request,
   2954	.enable_mgpu_fan_boost            = smu_enable_mgpu_fan_boost,
   2955	.set_active_display_count         = smu_set_display_count,
   2956	.set_min_deep_sleep_dcefclk       = smu_set_deep_sleep_dcefclk,
   2957	.get_asic_baco_capability         = smu_get_baco_capability,
   2958	.set_asic_baco_state              = smu_baco_set_state,
   2959	.get_ppfeature_status             = smu_sys_get_pp_feature_mask,
   2960	.set_ppfeature_status             = smu_sys_set_pp_feature_mask,
   2961	.asic_reset_mode_2                = smu_mode2_reset,
   2962	.set_df_cstate                    = smu_set_df_cstate,
   2963	.set_xgmi_pstate                  = smu_set_xgmi_pstate,
   2964	.get_gpu_metrics                  = smu_sys_get_gpu_metrics,
   2965	.set_watermarks_for_clock_ranges     = smu_set_watermarks_for_clock_ranges,
   2966	.display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
   2967	.get_max_sustainable_clocks_by_dc    = smu_get_max_sustainable_clocks_by_dc,
   2968	.get_uclk_dpm_states              = smu_get_uclk_dpm_states,
   2969	.get_dpm_clock_table              = smu_get_dpm_clock_table,
   2970	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
   2971};
   2972
   2973int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
   2974		       uint64_t event_arg)
   2975{
   2976	int ret = -EINVAL;
   2977
   2978	if (smu->ppt_funcs->wait_for_event)
   2979		ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
   2980
   2981	return ret;
   2982}
   2983
   2984int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
   2985{
   2986
   2987	if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
   2988		return -EOPNOTSUPP;
   2989
   2990	/* Confirm the buffer allocated is of correct size */
   2991	if (size != smu->stb_context.stb_buf_size)
   2992		return -EINVAL;
   2993
   2994	/*
   2995	 * No need to lock smu mutex as we access STB directly through MMIO
   2996	 * and not going through SMU messaging route (for now at least).
   2997	 * For registers access rely on implementation internal locking.
   2998	 */
   2999	return smu->ppt_funcs->stb_collect_info(smu, buf, size);
   3000}
   3001
   3002#if defined(CONFIG_DEBUG_FS)
   3003
   3004static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
   3005{
   3006	struct amdgpu_device *adev = filp->f_inode->i_private;
   3007	struct smu_context *smu = adev->powerplay.pp_handle;
   3008	unsigned char *buf;
   3009	int r;
   3010
   3011	buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
   3012	if (!buf)
   3013		return -ENOMEM;
   3014
   3015	r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
   3016	if (r)
   3017		goto out;
   3018
   3019	filp->private_data = buf;
   3020
   3021	return 0;
   3022
   3023out:
   3024	kvfree(buf);
   3025	return r;
   3026}
   3027
   3028static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
   3029				loff_t *pos)
   3030{
   3031	struct amdgpu_device *adev = filp->f_inode->i_private;
   3032	struct smu_context *smu = adev->powerplay.pp_handle;
   3033
   3034
   3035	if (!filp->private_data)
   3036		return -EINVAL;
   3037
   3038	return simple_read_from_buffer(buf,
   3039				       size,
   3040				       pos, filp->private_data,
   3041				       smu->stb_context.stb_buf_size);
   3042}
   3043
   3044static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
   3045{
   3046	kvfree(filp->private_data);
   3047	filp->private_data = NULL;
   3048
   3049	return 0;
   3050}
   3051
   3052/*
   3053 * We have to define not only read method but also
   3054 * open and release because .read takes up to PAGE_SIZE
   3055 * data each time so and so is invoked multiple times.
   3056 *  We allocate the STB buffer in .open and release it
   3057 *  in .release
   3058 */
   3059static const struct file_operations smu_stb_debugfs_fops = {
   3060	.owner = THIS_MODULE,
   3061	.open = smu_stb_debugfs_open,
   3062	.read = smu_stb_debugfs_read,
   3063	.release = smu_stb_debugfs_release,
   3064	.llseek = default_llseek,
   3065};
   3066
   3067#endif
   3068
   3069void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
   3070{
   3071#if defined(CONFIG_DEBUG_FS)
   3072
   3073	struct smu_context *smu = adev->powerplay.pp_handle;
   3074
   3075	if (!smu || (!smu->stb_context.stb_buf_size))
   3076		return;
   3077
   3078	debugfs_create_file_size("amdgpu_smu_stb_dump",
   3079			    S_IRUSR,
   3080			    adev_to_drm(adev)->primary->debugfs_root,
   3081			    adev,
   3082			    &smu_stb_debugfs_fops,
   3083			    smu->stb_context.stb_buf_size);
   3084#endif
   3085}
   3086
   3087int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
   3088{
   3089	int ret = 0;
   3090
   3091	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
   3092		ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
   3093
   3094	return ret;
   3095}
   3096
   3097int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
   3098{
   3099	int ret = 0;
   3100
   3101	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
   3102		ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
   3103
   3104	return ret;
   3105}