cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

aldebaran_ppt.c (65139B)


      1/*
      2 * Copyright 2019 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#define SWSMU_CODE_LAYER_L2
     25
     26#include <linux/firmware.h>
     27#include "amdgpu.h"
     28#include "amdgpu_dpm.h"
     29#include "amdgpu_smu.h"
     30#include "atomfirmware.h"
     31#include "amdgpu_atomfirmware.h"
     32#include "amdgpu_atombios.h"
     33#include "smu_v13_0.h"
     34#include "smu13_driver_if_aldebaran.h"
     35#include "soc15_common.h"
     36#include "atom.h"
     37#include "aldebaran_ppt.h"
     38#include "smu_v13_0_pptable.h"
     39#include "aldebaran_ppsmc.h"
     40#include "nbio/nbio_7_4_offset.h"
     41#include "nbio/nbio_7_4_sh_mask.h"
     42#include "thm/thm_11_0_2_offset.h"
     43#include "thm/thm_11_0_2_sh_mask.h"
     44#include "amdgpu_xgmi.h"
     45#include <linux/pci.h>
     46#include "amdgpu_ras.h"
     47#include "smu_cmn.h"
     48#include "mp/mp_13_0_2_offset.h"
     49
     50/*
     51 * DO NOT use these for err/warn/info/debug messages.
     52 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
     53 * They are more MGPU friendly.
     54 */
     55#undef pr_err
     56#undef pr_warn
     57#undef pr_info
     58#undef pr_debug
     59
     60#define ALDEBARAN_FEA_MAP(smu_feature, aldebaran_feature) \
     61	[smu_feature] = {1, (aldebaran_feature)}
     62
     63#define FEATURE_MASK(feature) (1ULL << feature)
     64#define SMC_DPM_FEATURE ( \
     65			  FEATURE_MASK(FEATURE_DATA_CALCULATIONS) | \
     66			  FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)	| \
     67			  FEATURE_MASK(FEATURE_DPM_UCLK_BIT)	| \
     68			  FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)	| \
     69			  FEATURE_MASK(FEATURE_DPM_FCLK_BIT)	| \
     70			  FEATURE_MASK(FEATURE_DPM_LCLK_BIT)	| \
     71			  FEATURE_MASK(FEATURE_DPM_XGMI_BIT)	| \
     72			  FEATURE_MASK(FEATURE_DPM_VCN_BIT))
     73
     74/* possible frequency drift (1Mhz) */
     75#define EPSILON				1
     76
     77#define smnPCIE_ESM_CTRL			0x111003D0
     78
     79/*
     80 * SMU support ECCTABLE since version 68.42.0,
     81 * use this to check ECCTALE feature whether support
     82 */
     83#define SUPPORT_ECCTABLE_SMU_VERSION 0x00442a00
     84
     85/*
     86 * SMU support BAD CHENNEL info MSG since version 68.51.00,
     87 * use this to check ECCTALE feature whether support
     88 */
     89#define SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION 0x00443300
     90
     91static const struct smu_temperature_range smu13_thermal_policy[] =
     92{
     93	{-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
     94	{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
     95};
     96
     97static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = {
     98	MSG_MAP(TestMessage,			     PPSMC_MSG_TestMessage,			0),
     99	MSG_MAP(GetSmuVersion,			     PPSMC_MSG_GetSmuVersion,			1),
    100	MSG_MAP(GetDriverIfVersion,		     PPSMC_MSG_GetDriverIfVersion,		1),
    101	MSG_MAP(EnableAllSmuFeatures,		     PPSMC_MSG_EnableAllSmuFeatures,		0),
    102	MSG_MAP(DisableAllSmuFeatures,		     PPSMC_MSG_DisableAllSmuFeatures,		0),
    103	MSG_MAP(GetEnabledSmuFeaturesLow,	     PPSMC_MSG_GetEnabledSmuFeaturesLow,	1),
    104	MSG_MAP(GetEnabledSmuFeaturesHigh,	     PPSMC_MSG_GetEnabledSmuFeaturesHigh,	1),
    105	MSG_MAP(SetDriverDramAddrHigh,		     PPSMC_MSG_SetDriverDramAddrHigh,		1),
    106	MSG_MAP(SetDriverDramAddrLow,		     PPSMC_MSG_SetDriverDramAddrLow,		1),
    107	MSG_MAP(SetToolsDramAddrHigh,		     PPSMC_MSG_SetToolsDramAddrHigh,		0),
    108	MSG_MAP(SetToolsDramAddrLow,		     PPSMC_MSG_SetToolsDramAddrLow,		0),
    109	MSG_MAP(TransferTableSmu2Dram,		     PPSMC_MSG_TransferTableSmu2Dram,		1),
    110	MSG_MAP(TransferTableDram2Smu,		     PPSMC_MSG_TransferTableDram2Smu,		0),
    111	MSG_MAP(UseDefaultPPTable,		     PPSMC_MSG_UseDefaultPPTable,		0),
    112	MSG_MAP(SetSystemVirtualDramAddrHigh,	     PPSMC_MSG_SetSystemVirtualDramAddrHigh,	0),
    113	MSG_MAP(SetSystemVirtualDramAddrLow,	     PPSMC_MSG_SetSystemVirtualDramAddrLow,	0),
    114	MSG_MAP(SetSoftMinByFreq,		     PPSMC_MSG_SetSoftMinByFreq,		0),
    115	MSG_MAP(SetSoftMaxByFreq,		     PPSMC_MSG_SetSoftMaxByFreq,		0),
    116	MSG_MAP(SetHardMinByFreq,		     PPSMC_MSG_SetHardMinByFreq,		0),
    117	MSG_MAP(SetHardMaxByFreq,		     PPSMC_MSG_SetHardMaxByFreq,		0),
    118	MSG_MAP(GetMinDpmFreq,			     PPSMC_MSG_GetMinDpmFreq,			0),
    119	MSG_MAP(GetMaxDpmFreq,			     PPSMC_MSG_GetMaxDpmFreq,			0),
    120	MSG_MAP(GetDpmFreqByIndex,		     PPSMC_MSG_GetDpmFreqByIndex,		1),
    121	MSG_MAP(SetWorkloadMask,		     PPSMC_MSG_SetWorkloadMask,			1),
    122	MSG_MAP(GetVoltageByDpm,		     PPSMC_MSG_GetVoltageByDpm,			0),
    123	MSG_MAP(GetVoltageByDpmOverdrive,	     PPSMC_MSG_GetVoltageByDpmOverdrive,	0),
    124	MSG_MAP(SetPptLimit,			     PPSMC_MSG_SetPptLimit,			0),
    125	MSG_MAP(GetPptLimit,			     PPSMC_MSG_GetPptLimit,			1),
    126	MSG_MAP(PrepareMp1ForUnload,		     PPSMC_MSG_PrepareMp1ForUnload,		0),
    127	MSG_MAP(GfxDeviceDriverReset,		     PPSMC_MSG_GfxDriverReset,			0),
    128	MSG_MAP(RunDcBtc,			     PPSMC_MSG_RunDcBtc,			0),
    129	MSG_MAP(DramLogSetDramAddrHigh,		     PPSMC_MSG_DramLogSetDramAddrHigh,		0),
    130	MSG_MAP(DramLogSetDramAddrLow,		     PPSMC_MSG_DramLogSetDramAddrLow,		0),
    131	MSG_MAP(DramLogSetDramSize,		     PPSMC_MSG_DramLogSetDramSize,		0),
    132	MSG_MAP(GetDebugData,			     PPSMC_MSG_GetDebugData,			0),
    133	MSG_MAP(WaflTest,			     PPSMC_MSG_WaflTest,			0),
    134	MSG_MAP(SetMemoryChannelEnable,		     PPSMC_MSG_SetMemoryChannelEnable,		0),
    135	MSG_MAP(SetNumBadHbmPagesRetired,	     PPSMC_MSG_SetNumBadHbmPagesRetired,	0),
    136	MSG_MAP(DFCstateControl,		     PPSMC_MSG_DFCstateControl,			0),
    137	MSG_MAP(GetGmiPwrDnHyst,		     PPSMC_MSG_GetGmiPwrDnHyst,			0),
    138	MSG_MAP(SetGmiPwrDnHyst,		     PPSMC_MSG_SetGmiPwrDnHyst,			0),
    139	MSG_MAP(GmiPwrDnControl,		     PPSMC_MSG_GmiPwrDnControl,			0),
    140	MSG_MAP(EnterGfxoff,			     PPSMC_MSG_EnterGfxoff,			0),
    141	MSG_MAP(ExitGfxoff,			     PPSMC_MSG_ExitGfxoff,			0),
    142	MSG_MAP(SetExecuteDMATest,		     PPSMC_MSG_SetExecuteDMATest,		0),
    143	MSG_MAP(EnableDeterminism,		     PPSMC_MSG_EnableDeterminism,		0),
    144	MSG_MAP(DisableDeterminism,		     PPSMC_MSG_DisableDeterminism,		0),
    145	MSG_MAP(SetUclkDpmMode,			     PPSMC_MSG_SetUclkDpmMode,			0),
    146	MSG_MAP(GfxDriverResetRecovery,		     PPSMC_MSG_GfxDriverResetRecovery,		0),
    147	MSG_MAP(BoardPowerCalibration,		     PPSMC_MSG_BoardPowerCalibration,		0),
    148	MSG_MAP(HeavySBR,                            PPSMC_MSG_HeavySBR,                        0),
    149	MSG_MAP(SetBadHBMPagesRetiredFlagsPerChannel,	PPSMC_MSG_SetBadHBMPagesRetiredFlagsPerChannel,	0),
    150};
    151
    152static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
    153	CLK_MAP(GFXCLK, PPCLK_GFXCLK),
    154	CLK_MAP(SCLK,	PPCLK_GFXCLK),
    155	CLK_MAP(SOCCLK, PPCLK_SOCCLK),
    156	CLK_MAP(FCLK, PPCLK_FCLK),
    157	CLK_MAP(UCLK, PPCLK_UCLK),
    158	CLK_MAP(MCLK, PPCLK_UCLK),
    159	CLK_MAP(DCLK, PPCLK_DCLK),
    160	CLK_MAP(VCLK, PPCLK_VCLK),
    161	CLK_MAP(LCLK, 	PPCLK_LCLK),
    162};
    163
    164static const struct cmn2asic_mapping aldebaran_feature_mask_map[SMU_FEATURE_COUNT] = {
    165	ALDEBARAN_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, 		FEATURE_DATA_CALCULATIONS),
    166	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, 			FEATURE_DPM_GFXCLK_BIT),
    167	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, 			FEATURE_DPM_UCLK_BIT),
    168	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, 			FEATURE_DPM_SOCCLK_BIT),
    169	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, 			FEATURE_DPM_FCLK_BIT),
    170	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, 			FEATURE_DPM_LCLK_BIT),
    171	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, 				FEATURE_DPM_XGMI_BIT),
    172	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, 			FEATURE_DS_GFXCLK_BIT),
    173	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, 			FEATURE_DS_SOCCLK_BIT),
    174	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, 				FEATURE_DS_LCLK_BIT),
    175	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, 				FEATURE_DS_FCLK_BIT),
    176	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_UCLK_BIT,				FEATURE_DS_UCLK_BIT),
    177	ALDEBARAN_FEA_MAP(SMU_FEATURE_GFX_SS_BIT, 				FEATURE_GFX_SS_BIT),
    178	ALDEBARAN_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, 				FEATURE_DPM_VCN_BIT),
    179	ALDEBARAN_FEA_MAP(SMU_FEATURE_RSMU_SMN_CG_BIT, 			FEATURE_RSMU_SMN_CG_BIT),
    180	ALDEBARAN_FEA_MAP(SMU_FEATURE_WAFL_CG_BIT, 				FEATURE_WAFL_CG_BIT),
    181	ALDEBARAN_FEA_MAP(SMU_FEATURE_PPT_BIT, 					FEATURE_PPT_BIT),
    182	ALDEBARAN_FEA_MAP(SMU_FEATURE_TDC_BIT, 					FEATURE_TDC_BIT),
    183	ALDEBARAN_FEA_MAP(SMU_FEATURE_APCC_PLUS_BIT, 			FEATURE_APCC_PLUS_BIT),
    184	ALDEBARAN_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, 			FEATURE_APCC_DFLL_BIT),
    185	ALDEBARAN_FEA_MAP(SMU_FEATURE_FUSE_CG_BIT, 				FEATURE_FUSE_CG_BIT),
    186	ALDEBARAN_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, 				FEATURE_MP1_CG_BIT),
    187	ALDEBARAN_FEA_MAP(SMU_FEATURE_SMUIO_CG_BIT, 			FEATURE_SMUIO_CG_BIT),
    188	ALDEBARAN_FEA_MAP(SMU_FEATURE_THM_CG_BIT, 				FEATURE_THM_CG_BIT),
    189	ALDEBARAN_FEA_MAP(SMU_FEATURE_CLK_CG_BIT, 				FEATURE_CLK_CG_BIT),
    190	ALDEBARAN_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, 				FEATURE_FW_CTF_BIT),
    191	ALDEBARAN_FEA_MAP(SMU_FEATURE_THERMAL_BIT, 				FEATURE_THERMAL_BIT),
    192	ALDEBARAN_FEA_MAP(SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, 	FEATURE_OUT_OF_BAND_MONITOR_BIT),
    193	ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT,FEATURE_XGMI_PER_LINK_PWR_DWN),
    194	ALDEBARAN_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, 			FEATURE_DF_CSTATE),
    195};
    196
    197static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = {
    198	TAB_MAP(PPTABLE),
    199	TAB_MAP(AVFS_PSM_DEBUG),
    200	TAB_MAP(AVFS_FUSE_OVERRIDE),
    201	TAB_MAP(PMSTATUSLOG),
    202	TAB_MAP(SMU_METRICS),
    203	TAB_MAP(DRIVER_SMU_CONFIG),
    204	TAB_MAP(I2C_COMMANDS),
    205	TAB_MAP(ECCINFO),
    206};
    207
    208static const uint8_t aldebaran_throttler_map[] = {
    209	[THROTTLER_PPT0_BIT]		= (SMU_THROTTLER_PPT0_BIT),
    210	[THROTTLER_PPT1_BIT]		= (SMU_THROTTLER_PPT1_BIT),
    211	[THROTTLER_TDC_GFX_BIT]		= (SMU_THROTTLER_TDC_GFX_BIT),
    212	[THROTTLER_TDC_SOC_BIT]		= (SMU_THROTTLER_TDC_SOC_BIT),
    213	[THROTTLER_TDC_HBM_BIT]		= (SMU_THROTTLER_TDC_MEM_BIT),
    214	[THROTTLER_TEMP_GPU_BIT]	= (SMU_THROTTLER_TEMP_GPU_BIT),
    215	[THROTTLER_TEMP_MEM_BIT]	= (SMU_THROTTLER_TEMP_MEM_BIT),
    216	[THROTTLER_TEMP_VR_GFX_BIT]	= (SMU_THROTTLER_TEMP_VR_GFX_BIT),
    217	[THROTTLER_TEMP_VR_SOC_BIT]	= (SMU_THROTTLER_TEMP_VR_SOC_BIT),
    218	[THROTTLER_TEMP_VR_MEM_BIT]	= (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
    219	[THROTTLER_APCC_BIT]		= (SMU_THROTTLER_APCC_BIT),
    220};
    221
    222static int aldebaran_tables_init(struct smu_context *smu)
    223{
    224	struct smu_table_context *smu_table = &smu->smu_table;
    225	struct smu_table *tables = smu_table->tables;
    226
    227	SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
    228		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    229
    230	SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
    231		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    232
    233	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
    234		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    235
    236	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
    237		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    238
    239	SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
    240		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    241
    242	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
    243	if (!smu_table->metrics_table)
    244		return -ENOMEM;
    245	smu_table->metrics_time = 0;
    246
    247	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
    248	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
    249	if (!smu_table->gpu_metrics_table) {
    250		kfree(smu_table->metrics_table);
    251		return -ENOMEM;
    252	}
    253
    254	smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
    255	if (!smu_table->ecc_table)
    256		return -ENOMEM;
    257
    258	return 0;
    259}
    260
    261static int aldebaran_allocate_dpm_context(struct smu_context *smu)
    262{
    263	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
    264
    265	smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
    266				       GFP_KERNEL);
    267	if (!smu_dpm->dpm_context)
    268		return -ENOMEM;
    269	smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
    270
    271	return 0;
    272}
    273
    274static int aldebaran_init_smc_tables(struct smu_context *smu)
    275{
    276	int ret = 0;
    277
    278	ret = aldebaran_tables_init(smu);
    279	if (ret)
    280		return ret;
    281
    282	ret = aldebaran_allocate_dpm_context(smu);
    283	if (ret)
    284		return ret;
    285
    286	return smu_v13_0_init_smc_tables(smu);
    287}
    288
    289static int aldebaran_get_allowed_feature_mask(struct smu_context *smu,
    290					      uint32_t *feature_mask, uint32_t num)
    291{
    292	if (num > 2)
    293		return -EINVAL;
    294
    295	/* pptable will handle the features to enable */
    296	memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
    297
    298	return 0;
    299}
    300
    301static int aldebaran_set_default_dpm_table(struct smu_context *smu)
    302{
    303	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
    304	struct smu_13_0_dpm_table *dpm_table = NULL;
    305	PPTable_t *pptable = smu->smu_table.driver_pptable;
    306	int ret = 0;
    307
    308	/* socclk dpm table setup */
    309	dpm_table = &dpm_context->dpm_tables.soc_table;
    310	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
    311		ret = smu_v13_0_set_single_dpm_table(smu,
    312						     SMU_SOCCLK,
    313						     dpm_table);
    314		if (ret)
    315			return ret;
    316	} else {
    317		dpm_table->count = 1;
    318		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
    319		dpm_table->dpm_levels[0].enabled = true;
    320		dpm_table->min = dpm_table->dpm_levels[0].value;
    321		dpm_table->max = dpm_table->dpm_levels[0].value;
    322	}
    323
    324	/* gfxclk dpm table setup */
    325	dpm_table = &dpm_context->dpm_tables.gfx_table;
    326	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
    327		/* in the case of gfxclk, only fine-grained dpm is honored */
    328		dpm_table->count = 2;
    329		dpm_table->dpm_levels[0].value = pptable->GfxclkFmin;
    330		dpm_table->dpm_levels[0].enabled = true;
    331		dpm_table->dpm_levels[1].value = pptable->GfxclkFmax;
    332		dpm_table->dpm_levels[1].enabled = true;
    333		dpm_table->min = dpm_table->dpm_levels[0].value;
    334		dpm_table->max = dpm_table->dpm_levels[1].value;
    335	} else {
    336		dpm_table->count = 1;
    337		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
    338		dpm_table->dpm_levels[0].enabled = true;
    339		dpm_table->min = dpm_table->dpm_levels[0].value;
    340		dpm_table->max = dpm_table->dpm_levels[0].value;
    341	}
    342
    343	/* memclk dpm table setup */
    344	dpm_table = &dpm_context->dpm_tables.uclk_table;
    345	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
    346		ret = smu_v13_0_set_single_dpm_table(smu,
    347						     SMU_UCLK,
    348						     dpm_table);
    349		if (ret)
    350			return ret;
    351	} else {
    352		dpm_table->count = 1;
    353		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
    354		dpm_table->dpm_levels[0].enabled = true;
    355		dpm_table->min = dpm_table->dpm_levels[0].value;
    356		dpm_table->max = dpm_table->dpm_levels[0].value;
    357	}
    358
    359	/* fclk dpm table setup */
    360	dpm_table = &dpm_context->dpm_tables.fclk_table;
    361	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
    362		ret = smu_v13_0_set_single_dpm_table(smu,
    363						     SMU_FCLK,
    364						     dpm_table);
    365		if (ret)
    366			return ret;
    367	} else {
    368		dpm_table->count = 1;
    369		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
    370		dpm_table->dpm_levels[0].enabled = true;
    371		dpm_table->min = dpm_table->dpm_levels[0].value;
    372		dpm_table->max = dpm_table->dpm_levels[0].value;
    373	}
    374
    375	return 0;
    376}
    377
    378static int aldebaran_check_powerplay_table(struct smu_context *smu)
    379{
    380	struct smu_table_context *table_context = &smu->smu_table;
    381	struct smu_13_0_powerplay_table *powerplay_table =
    382		table_context->power_play_table;
    383
    384	table_context->thermal_controller_type =
    385		powerplay_table->thermal_controller_type;
    386
    387	return 0;
    388}
    389
    390static int aldebaran_store_powerplay_table(struct smu_context *smu)
    391{
    392	struct smu_table_context *table_context = &smu->smu_table;
    393	struct smu_13_0_powerplay_table *powerplay_table =
    394		table_context->power_play_table;
    395	memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
    396	       sizeof(PPTable_t));
    397
    398	return 0;
    399}
    400
    401static int aldebaran_append_powerplay_table(struct smu_context *smu)
    402{
    403	struct smu_table_context *table_context = &smu->smu_table;
    404	PPTable_t *smc_pptable = table_context->driver_pptable;
    405	struct atom_smc_dpm_info_v4_10 *smc_dpm_table;
    406	int index, ret;
    407
    408	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
    409					   smc_dpm_info);
    410
    411	ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
    412				      (uint8_t **)&smc_dpm_table);
    413	if (ret)
    414		return ret;
    415
    416	dev_info(smu->adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n",
    417			smc_dpm_table->table_header.format_revision,
    418			smc_dpm_table->table_header.content_revision);
    419
    420	if ((smc_dpm_table->table_header.format_revision == 4) &&
    421	    (smc_dpm_table->table_header.content_revision == 10))
    422		smu_memcpy_trailing(smc_pptable, GfxMaxCurrent, reserved,
    423				    smc_dpm_table, GfxMaxCurrent);
    424	return 0;
    425}
    426
    427static int aldebaran_setup_pptable(struct smu_context *smu)
    428{
    429	int ret = 0;
    430
    431	/* VBIOS pptable is the first choice */
    432	smu->smu_table.boot_values.pp_table_id = 0;
    433
    434	ret = smu_v13_0_setup_pptable(smu);
    435	if (ret)
    436		return ret;
    437
    438	ret = aldebaran_store_powerplay_table(smu);
    439	if (ret)
    440		return ret;
    441
    442	ret = aldebaran_append_powerplay_table(smu);
    443	if (ret)
    444		return ret;
    445
    446	ret = aldebaran_check_powerplay_table(smu);
    447	if (ret)
    448		return ret;
    449
    450	return ret;
    451}
    452
    453static bool aldebaran_is_primary(struct smu_context *smu)
    454{
    455	struct amdgpu_device *adev = smu->adev;
    456
    457	if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
    458		return adev->smuio.funcs->get_die_id(adev) == 0;
    459
    460	return true;
    461}
    462
    463static int aldebaran_run_board_btc(struct smu_context *smu)
    464{
    465	u32 smu_version;
    466	int ret;
    467
    468	if (!aldebaran_is_primary(smu))
    469		return 0;
    470
    471	ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
    472	if (ret) {
    473		dev_err(smu->adev->dev, "Failed to get smu version!\n");
    474		return ret;
    475	}
    476	if (smu_version <= 0x00441d00)
    477		return 0;
    478
    479	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL);
    480	if (ret)
    481		dev_err(smu->adev->dev, "Board power calibration failed!\n");
    482
    483	return ret;
    484}
    485
    486static int aldebaran_run_btc(struct smu_context *smu)
    487{
    488	int ret;
    489
    490	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
    491	if (ret)
    492		dev_err(smu->adev->dev, "RunDcBtc failed!\n");
    493	else
    494		ret = aldebaran_run_board_btc(smu);
    495
    496	return ret;
    497}
    498
    499static int aldebaran_populate_umd_state_clk(struct smu_context *smu)
    500{
    501	struct smu_13_0_dpm_context *dpm_context =
    502		smu->smu_dpm.dpm_context;
    503	struct smu_13_0_dpm_table *gfx_table =
    504		&dpm_context->dpm_tables.gfx_table;
    505	struct smu_13_0_dpm_table *mem_table =
    506		&dpm_context->dpm_tables.uclk_table;
    507	struct smu_13_0_dpm_table *soc_table =
    508		&dpm_context->dpm_tables.soc_table;
    509	struct smu_umd_pstate_table *pstate_table =
    510		&smu->pstate_table;
    511
    512	pstate_table->gfxclk_pstate.min = gfx_table->min;
    513	pstate_table->gfxclk_pstate.peak = gfx_table->max;
    514	pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
    515	pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
    516
    517	pstate_table->uclk_pstate.min = mem_table->min;
    518	pstate_table->uclk_pstate.peak = mem_table->max;
    519	pstate_table->uclk_pstate.curr.min = mem_table->min;
    520	pstate_table->uclk_pstate.curr.max = mem_table->max;
    521
    522	pstate_table->socclk_pstate.min = soc_table->min;
    523	pstate_table->socclk_pstate.peak = soc_table->max;
    524	pstate_table->socclk_pstate.curr.min = soc_table->min;
    525	pstate_table->socclk_pstate.curr.max = soc_table->max;
    526
    527	if (gfx_table->count > ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL &&
    528	    mem_table->count > ALDEBARAN_UMD_PSTATE_MCLK_LEVEL &&
    529	    soc_table->count > ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL) {
    530		pstate_table->gfxclk_pstate.standard =
    531			gfx_table->dpm_levels[ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL].value;
    532		pstate_table->uclk_pstate.standard =
    533			mem_table->dpm_levels[ALDEBARAN_UMD_PSTATE_MCLK_LEVEL].value;
    534		pstate_table->socclk_pstate.standard =
    535			soc_table->dpm_levels[ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL].value;
    536	} else {
    537		pstate_table->gfxclk_pstate.standard =
    538			pstate_table->gfxclk_pstate.min;
    539		pstate_table->uclk_pstate.standard =
    540			pstate_table->uclk_pstate.min;
    541		pstate_table->socclk_pstate.standard =
    542			pstate_table->socclk_pstate.min;
    543	}
    544
    545	return 0;
    546}
    547
    548static int aldebaran_get_clk_table(struct smu_context *smu,
    549				   struct pp_clock_levels_with_latency *clocks,
    550				   struct smu_13_0_dpm_table *dpm_table)
    551{
    552	int i, count;
    553
    554	count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
    555	clocks->num_levels = count;
    556
    557	for (i = 0; i < count; i++) {
    558		clocks->data[i].clocks_in_khz =
    559			dpm_table->dpm_levels[i].value * 1000;
    560		clocks->data[i].latency_in_us = 0;
    561	}
    562
    563	return 0;
    564}
    565
    566static int aldebaran_freqs_in_same_level(int32_t frequency1,
    567					 int32_t frequency2)
    568{
    569	return (abs(frequency1 - frequency2) <= EPSILON);
    570}
    571
    572static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
    573					  MetricsMember_t member,
    574					  uint32_t *value)
    575{
    576	struct smu_table_context *smu_table= &smu->smu_table;
    577	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
    578	int ret = 0;
    579
    580	ret = smu_cmn_get_metrics_table(smu,
    581					NULL,
    582					false);
    583	if (ret)
    584		return ret;
    585
    586	switch (member) {
    587	case METRICS_CURR_GFXCLK:
    588		*value = metrics->CurrClock[PPCLK_GFXCLK];
    589		break;
    590	case METRICS_CURR_SOCCLK:
    591		*value = metrics->CurrClock[PPCLK_SOCCLK];
    592		break;
    593	case METRICS_CURR_UCLK:
    594		*value = metrics->CurrClock[PPCLK_UCLK];
    595		break;
    596	case METRICS_CURR_VCLK:
    597		*value = metrics->CurrClock[PPCLK_VCLK];
    598		break;
    599	case METRICS_CURR_DCLK:
    600		*value = metrics->CurrClock[PPCLK_DCLK];
    601		break;
    602	case METRICS_CURR_FCLK:
    603		*value = metrics->CurrClock[PPCLK_FCLK];
    604		break;
    605	case METRICS_AVERAGE_GFXCLK:
    606		*value = metrics->AverageGfxclkFrequency;
    607		break;
    608	case METRICS_AVERAGE_SOCCLK:
    609		*value = metrics->AverageSocclkFrequency;
    610		break;
    611	case METRICS_AVERAGE_UCLK:
    612		*value = metrics->AverageUclkFrequency;
    613		break;
    614	case METRICS_AVERAGE_GFXACTIVITY:
    615		*value = metrics->AverageGfxActivity;
    616		break;
    617	case METRICS_AVERAGE_MEMACTIVITY:
    618		*value = metrics->AverageUclkActivity;
    619		break;
    620	case METRICS_AVERAGE_SOCKETPOWER:
    621		/* Valid power data is available only from primary die */
    622		*value = aldebaran_is_primary(smu) ?
    623				 metrics->AverageSocketPower << 8 :
    624				 0;
    625		break;
    626	case METRICS_TEMPERATURE_EDGE:
    627		*value = metrics->TemperatureEdge *
    628			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    629		break;
    630	case METRICS_TEMPERATURE_HOTSPOT:
    631		*value = metrics->TemperatureHotspot *
    632			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    633		break;
    634	case METRICS_TEMPERATURE_MEM:
    635		*value = metrics->TemperatureHBM *
    636			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    637		break;
    638	case METRICS_TEMPERATURE_VRGFX:
    639		*value = metrics->TemperatureVrGfx *
    640			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    641		break;
    642	case METRICS_TEMPERATURE_VRSOC:
    643		*value = metrics->TemperatureVrSoc *
    644			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    645		break;
    646	case METRICS_TEMPERATURE_VRMEM:
    647		*value = metrics->TemperatureVrMem *
    648			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    649		break;
    650	case METRICS_THROTTLER_STATUS:
    651		*value = metrics->ThrottlerStatus;
    652		break;
    653	case METRICS_UNIQUE_ID_UPPER32:
    654		*value = metrics->PublicSerialNumUpper32;
    655		break;
    656	case METRICS_UNIQUE_ID_LOWER32:
    657		*value = metrics->PublicSerialNumLower32;
    658		break;
    659	default:
    660		*value = UINT_MAX;
    661		break;
    662	}
    663
    664	return ret;
    665}
    666
    667static int aldebaran_get_current_clk_freq_by_table(struct smu_context *smu,
    668						   enum smu_clk_type clk_type,
    669						   uint32_t *value)
    670{
    671	MetricsMember_t member_type;
    672	int clk_id = 0;
    673
    674	if (!value)
    675		return -EINVAL;
    676
    677	clk_id = smu_cmn_to_asic_specific_index(smu,
    678						CMN2ASIC_MAPPING_CLK,
    679						clk_type);
    680	if (clk_id < 0)
    681		return -EINVAL;
    682
    683	switch (clk_id) {
    684	case PPCLK_GFXCLK:
    685		/*
    686		 * CurrClock[clk_id] can provide accurate
    687		 *   output only when the dpm feature is enabled.
    688		 * We can use Average_* for dpm disabled case.
    689		 *   But this is available for gfxclk/uclk/socclk/vclk/dclk.
    690		 */
    691		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
    692			member_type = METRICS_CURR_GFXCLK;
    693		else
    694			member_type = METRICS_AVERAGE_GFXCLK;
    695		break;
    696	case PPCLK_UCLK:
    697		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
    698			member_type = METRICS_CURR_UCLK;
    699		else
    700			member_type = METRICS_AVERAGE_UCLK;
    701		break;
    702	case PPCLK_SOCCLK:
    703		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
    704			member_type = METRICS_CURR_SOCCLK;
    705		else
    706			member_type = METRICS_AVERAGE_SOCCLK;
    707		break;
    708	case PPCLK_VCLK:
    709		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
    710			member_type = METRICS_CURR_VCLK;
    711		else
    712			member_type = METRICS_AVERAGE_VCLK;
    713		break;
    714	case PPCLK_DCLK:
    715		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
    716			member_type = METRICS_CURR_DCLK;
    717		else
    718			member_type = METRICS_AVERAGE_DCLK;
    719		break;
    720	case PPCLK_FCLK:
    721		member_type = METRICS_CURR_FCLK;
    722		break;
    723	default:
    724		return -EINVAL;
    725	}
    726
    727	return aldebaran_get_smu_metrics_data(smu,
    728					      member_type,
    729					      value);
    730}
    731
    732static int aldebaran_print_clk_levels(struct smu_context *smu,
    733				      enum smu_clk_type type, char *buf)
    734{
    735	int i, now, size = 0;
    736	int ret = 0;
    737	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
    738	struct pp_clock_levels_with_latency clocks;
    739	struct smu_13_0_dpm_table *single_dpm_table;
    740	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
    741	struct smu_13_0_dpm_context *dpm_context = NULL;
    742	uint32_t display_levels;
    743	uint32_t freq_values[3] = {0};
    744	uint32_t min_clk, max_clk;
    745
    746	smu_cmn_get_sysfs_buf(&buf, &size);
    747
    748	if (amdgpu_ras_intr_triggered()) {
    749		size += sysfs_emit_at(buf, size, "unavailable\n");
    750		return size;
    751	}
    752
    753	dpm_context = smu_dpm->dpm_context;
    754
    755	switch (type) {
    756
    757	case SMU_OD_SCLK:
    758		size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
    759		fallthrough;
    760	case SMU_SCLK:
    761		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
    762		if (ret) {
    763			dev_err(smu->adev->dev, "Attempt to get current gfx clk Failed!");
    764			return ret;
    765		}
    766
    767		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
    768		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
    769		if (ret) {
    770			dev_err(smu->adev->dev, "Attempt to get gfx clk levels Failed!");
    771			return ret;
    772		}
    773
    774		display_levels = clocks.num_levels;
    775
    776		min_clk = pstate_table->gfxclk_pstate.curr.min;
    777		max_clk = pstate_table->gfxclk_pstate.curr.max;
    778
    779		freq_values[0] = min_clk;
    780		freq_values[1] = max_clk;
    781
    782		/* fine-grained dpm has only 2 levels */
    783		if (now > min_clk && now < max_clk) {
    784			display_levels = clocks.num_levels + 1;
    785			freq_values[2] = max_clk;
    786			freq_values[1] = now;
    787		}
    788
    789		/*
    790		 * For DPM disabled case, there will be only one clock level.
    791		 * And it's safe to assume that is always the current clock.
    792		 */
    793		if (display_levels == clocks.num_levels) {
    794			for (i = 0; i < clocks.num_levels; i++)
    795				size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
    796					freq_values[i],
    797					(clocks.num_levels == 1) ?
    798						"*" :
    799						(aldebaran_freqs_in_same_level(
    800							 freq_values[i], now) ?
    801							 "*" :
    802							 ""));
    803		} else {
    804			for (i = 0; i < display_levels; i++)
    805				size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
    806						freq_values[i], i == 1 ? "*" : "");
    807		}
    808
    809		break;
    810
    811	case SMU_OD_MCLK:
    812		size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
    813		fallthrough;
    814	case SMU_MCLK:
    815		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
    816		if (ret) {
    817			dev_err(smu->adev->dev, "Attempt to get current mclk Failed!");
    818			return ret;
    819		}
    820
    821		single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
    822		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
    823		if (ret) {
    824			dev_err(smu->adev->dev, "Attempt to get memory clk levels Failed!");
    825			return ret;
    826		}
    827
    828		for (i = 0; i < clocks.num_levels; i++)
    829			size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
    830					i, clocks.data[i].clocks_in_khz / 1000,
    831					(clocks.num_levels == 1) ? "*" :
    832					(aldebaran_freqs_in_same_level(
    833								       clocks.data[i].clocks_in_khz / 1000,
    834								       now) ? "*" : ""));
    835		break;
    836
    837	case SMU_SOCCLK:
    838		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now);
    839		if (ret) {
    840			dev_err(smu->adev->dev, "Attempt to get current socclk Failed!");
    841			return ret;
    842		}
    843
    844		single_dpm_table = &(dpm_context->dpm_tables.soc_table);
    845		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
    846		if (ret) {
    847			dev_err(smu->adev->dev, "Attempt to get socclk levels Failed!");
    848			return ret;
    849		}
    850
    851		for (i = 0; i < clocks.num_levels; i++)
    852			size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
    853					i, clocks.data[i].clocks_in_khz / 1000,
    854					(clocks.num_levels == 1) ? "*" :
    855					(aldebaran_freqs_in_same_level(
    856								       clocks.data[i].clocks_in_khz / 1000,
    857								       now) ? "*" : ""));
    858		break;
    859
    860	case SMU_FCLK:
    861		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &now);
    862		if (ret) {
    863			dev_err(smu->adev->dev, "Attempt to get current fclk Failed!");
    864			return ret;
    865		}
    866
    867		single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
    868		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
    869		if (ret) {
    870			dev_err(smu->adev->dev, "Attempt to get fclk levels Failed!");
    871			return ret;
    872		}
    873
    874		for (i = 0; i < single_dpm_table->count; i++)
    875			size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
    876					i, single_dpm_table->dpm_levels[i].value,
    877					(clocks.num_levels == 1) ? "*" :
    878					(aldebaran_freqs_in_same_level(
    879								       clocks.data[i].clocks_in_khz / 1000,
    880								       now) ? "*" : ""));
    881		break;
    882
    883	case SMU_VCLK:
    884		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
    885		if (ret) {
    886			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
    887			return ret;
    888		}
    889
    890		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
    891		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
    892		if (ret) {
    893			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
    894			return ret;
    895		}
    896
    897		for (i = 0; i < single_dpm_table->count; i++)
    898			size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
    899					i, single_dpm_table->dpm_levels[i].value,
    900					(clocks.num_levels == 1) ? "*" :
    901					(aldebaran_freqs_in_same_level(
    902								       clocks.data[i].clocks_in_khz / 1000,
    903								       now) ? "*" : ""));
    904		break;
    905
    906	case SMU_DCLK:
    907		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
    908		if (ret) {
    909			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
    910			return ret;
    911		}
    912
    913		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
    914		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
    915		if (ret) {
    916			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
    917			return ret;
    918		}
    919
    920		for (i = 0; i < single_dpm_table->count; i++)
    921			size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
    922					i, single_dpm_table->dpm_levels[i].value,
    923					(clocks.num_levels == 1) ? "*" :
    924					(aldebaran_freqs_in_same_level(
    925								       clocks.data[i].clocks_in_khz / 1000,
    926								       now) ? "*" : ""));
    927		break;
    928
    929	default:
    930		break;
    931	}
    932
    933	return size;
    934}
    935
    936static int aldebaran_upload_dpm_level(struct smu_context *smu,
    937				      bool max,
    938				      uint32_t feature_mask,
    939				      uint32_t level)
    940{
    941	struct smu_13_0_dpm_context *dpm_context =
    942		smu->smu_dpm.dpm_context;
    943	uint32_t freq;
    944	int ret = 0;
    945
    946	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
    947	    (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT))) {
    948		freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
    949		ret = smu_cmn_send_smc_msg_with_param(smu,
    950						      (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
    951						      (PPCLK_GFXCLK << 16) | (freq & 0xffff),
    952						      NULL);
    953		if (ret) {
    954			dev_err(smu->adev->dev, "Failed to set soft %s gfxclk !\n",
    955				max ? "max" : "min");
    956			return ret;
    957		}
    958	}
    959
    960	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
    961	    (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK_BIT))) {
    962		freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level].value;
    963		ret = smu_cmn_send_smc_msg_with_param(smu,
    964						      (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
    965						      (PPCLK_UCLK << 16) | (freq & 0xffff),
    966						      NULL);
    967		if (ret) {
    968			dev_err(smu->adev->dev, "Failed to set soft %s memclk !\n",
    969				max ? "max" : "min");
    970			return ret;
    971		}
    972	}
    973
    974	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
    975	    (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT))) {
    976		freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
    977		ret = smu_cmn_send_smc_msg_with_param(smu,
    978						      (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
    979						      (PPCLK_SOCCLK << 16) | (freq & 0xffff),
    980						      NULL);
    981		if (ret) {
    982			dev_err(smu->adev->dev, "Failed to set soft %s socclk !\n",
    983				max ? "max" : "min");
    984			return ret;
    985		}
    986	}
    987
    988	return ret;
    989}
    990
    991static int aldebaran_force_clk_levels(struct smu_context *smu,
    992				      enum smu_clk_type type, uint32_t mask)
    993{
    994	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
    995	struct smu_13_0_dpm_table *single_dpm_table = NULL;
    996	uint32_t soft_min_level, soft_max_level;
    997	int ret = 0;
    998
    999	soft_min_level = mask ? (ffs(mask) - 1) : 0;
   1000	soft_max_level = mask ? (fls(mask) - 1) : 0;
   1001
   1002	switch (type) {
   1003	case SMU_SCLK:
   1004		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
   1005		if (soft_max_level >= single_dpm_table->count) {
   1006			dev_err(smu->adev->dev, "Clock level specified %d is over max allowed %d\n",
   1007				soft_max_level, single_dpm_table->count - 1);
   1008			ret = -EINVAL;
   1009			break;
   1010		}
   1011
   1012		ret = aldebaran_upload_dpm_level(smu,
   1013						 false,
   1014						 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT),
   1015						 soft_min_level);
   1016		if (ret) {
   1017			dev_err(smu->adev->dev, "Failed to upload boot level to lowest!\n");
   1018			break;
   1019		}
   1020
   1021		ret = aldebaran_upload_dpm_level(smu,
   1022						 true,
   1023						 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT),
   1024						 soft_max_level);
   1025		if (ret)
   1026			dev_err(smu->adev->dev, "Failed to upload dpm max level to highest!\n");
   1027
   1028		break;
   1029
   1030	case SMU_MCLK:
   1031	case SMU_SOCCLK:
   1032	case SMU_FCLK:
   1033		/*
   1034		 * Should not arrive here since aldebaran does not
   1035		 * support mclk/socclk/fclk softmin/softmax settings
   1036		 */
   1037		ret = -EINVAL;
   1038		break;
   1039
   1040	default:
   1041		break;
   1042	}
   1043
   1044	return ret;
   1045}
   1046
   1047static int aldebaran_get_thermal_temperature_range(struct smu_context *smu,
   1048						   struct smu_temperature_range *range)
   1049{
   1050	struct smu_table_context *table_context = &smu->smu_table;
   1051	struct smu_13_0_powerplay_table *powerplay_table =
   1052		table_context->power_play_table;
   1053	PPTable_t *pptable = smu->smu_table.driver_pptable;
   1054
   1055	if (!range)
   1056		return -EINVAL;
   1057
   1058	memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range));
   1059
   1060	range->hotspot_crit_max = pptable->ThotspotLimit *
   1061		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   1062	range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
   1063		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   1064	range->mem_crit_max = pptable->TmemLimit *
   1065		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   1066	range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)*
   1067		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   1068	range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
   1069
   1070	return 0;
   1071}
   1072
   1073static int aldebaran_get_current_activity_percent(struct smu_context *smu,
   1074						  enum amd_pp_sensors sensor,
   1075						  uint32_t *value)
   1076{
   1077	int ret = 0;
   1078
   1079	if (!value)
   1080		return -EINVAL;
   1081
   1082	switch (sensor) {
   1083	case AMDGPU_PP_SENSOR_GPU_LOAD:
   1084		ret = aldebaran_get_smu_metrics_data(smu,
   1085						     METRICS_AVERAGE_GFXACTIVITY,
   1086						     value);
   1087		break;
   1088	case AMDGPU_PP_SENSOR_MEM_LOAD:
   1089		ret = aldebaran_get_smu_metrics_data(smu,
   1090						     METRICS_AVERAGE_MEMACTIVITY,
   1091						     value);
   1092		break;
   1093	default:
   1094		dev_err(smu->adev->dev, "Invalid sensor for retrieving clock activity\n");
   1095		return -EINVAL;
   1096	}
   1097
   1098	return ret;
   1099}
   1100
   1101static int aldebaran_get_gpu_power(struct smu_context *smu, uint32_t *value)
   1102{
   1103	if (!value)
   1104		return -EINVAL;
   1105
   1106	return aldebaran_get_smu_metrics_data(smu,
   1107					      METRICS_AVERAGE_SOCKETPOWER,
   1108					      value);
   1109}
   1110
   1111static int aldebaran_thermal_get_temperature(struct smu_context *smu,
   1112					     enum amd_pp_sensors sensor,
   1113					     uint32_t *value)
   1114{
   1115	int ret = 0;
   1116
   1117	if (!value)
   1118		return -EINVAL;
   1119
   1120	switch (sensor) {
   1121	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
   1122		ret = aldebaran_get_smu_metrics_data(smu,
   1123						     METRICS_TEMPERATURE_HOTSPOT,
   1124						     value);
   1125		break;
   1126	case AMDGPU_PP_SENSOR_EDGE_TEMP:
   1127		ret = aldebaran_get_smu_metrics_data(smu,
   1128						     METRICS_TEMPERATURE_EDGE,
   1129						     value);
   1130		break;
   1131	case AMDGPU_PP_SENSOR_MEM_TEMP:
   1132		ret = aldebaran_get_smu_metrics_data(smu,
   1133						     METRICS_TEMPERATURE_MEM,
   1134						     value);
   1135		break;
   1136	default:
   1137		dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
   1138		return -EINVAL;
   1139	}
   1140
   1141	return ret;
   1142}
   1143
   1144static int aldebaran_read_sensor(struct smu_context *smu,
   1145				 enum amd_pp_sensors sensor,
   1146				 void *data, uint32_t *size)
   1147{
   1148	int ret = 0;
   1149
   1150	if (amdgpu_ras_intr_triggered())
   1151		return 0;
   1152
   1153	if (!data || !size)
   1154		return -EINVAL;
   1155
   1156	switch (sensor) {
   1157	case AMDGPU_PP_SENSOR_MEM_LOAD:
   1158	case AMDGPU_PP_SENSOR_GPU_LOAD:
   1159		ret = aldebaran_get_current_activity_percent(smu,
   1160							     sensor,
   1161							     (uint32_t *)data);
   1162		*size = 4;
   1163		break;
   1164	case AMDGPU_PP_SENSOR_GPU_POWER:
   1165		ret = aldebaran_get_gpu_power(smu, (uint32_t *)data);
   1166		*size = 4;
   1167		break;
   1168	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
   1169	case AMDGPU_PP_SENSOR_EDGE_TEMP:
   1170	case AMDGPU_PP_SENSOR_MEM_TEMP:
   1171		ret = aldebaran_thermal_get_temperature(smu, sensor,
   1172							(uint32_t *)data);
   1173		*size = 4;
   1174		break;
   1175	case AMDGPU_PP_SENSOR_GFX_MCLK:
   1176		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
   1177		/* the output clock frequency in 10K unit */
   1178		*(uint32_t *)data *= 100;
   1179		*size = 4;
   1180		break;
   1181	case AMDGPU_PP_SENSOR_GFX_SCLK:
   1182		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
   1183		*(uint32_t *)data *= 100;
   1184		*size = 4;
   1185		break;
   1186	case AMDGPU_PP_SENSOR_VDDGFX:
   1187		ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
   1188		*size = 4;
   1189		break;
   1190	default:
   1191		ret = -EOPNOTSUPP;
   1192		break;
   1193	}
   1194
   1195	return ret;
   1196}
   1197
   1198static int aldebaran_get_power_limit(struct smu_context *smu,
   1199				     uint32_t *current_power_limit,
   1200				     uint32_t *default_power_limit,
   1201				     uint32_t *max_power_limit)
   1202{
   1203	PPTable_t *pptable = smu->smu_table.driver_pptable;
   1204	uint32_t power_limit = 0;
   1205	int ret;
   1206
   1207	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
   1208		if (current_power_limit)
   1209			*current_power_limit = 0;
   1210		if (default_power_limit)
   1211			*default_power_limit = 0;
   1212		if (max_power_limit)
   1213			*max_power_limit = 0;
   1214
   1215		dev_warn(smu->adev->dev,
   1216			"PPT feature is not enabled, power values can't be fetched.");
   1217
   1218		return 0;
   1219	}
   1220
   1221	/* Valid power data is available only from primary die.
   1222	 * For secondary die show the value as 0.
   1223	 */
   1224	if (aldebaran_is_primary(smu)) {
   1225		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit,
   1226					   &power_limit);
   1227
   1228		if (ret) {
   1229			/* the last hope to figure out the ppt limit */
   1230			if (!pptable) {
   1231				dev_err(smu->adev->dev,
   1232					"Cannot get PPT limit due to pptable missing!");
   1233				return -EINVAL;
   1234			}
   1235			power_limit = pptable->PptLimit;
   1236		}
   1237	}
   1238
   1239	if (current_power_limit)
   1240		*current_power_limit = power_limit;
   1241	if (default_power_limit)
   1242		*default_power_limit = power_limit;
   1243
   1244	if (max_power_limit) {
   1245		if (pptable)
   1246			*max_power_limit = pptable->PptLimit;
   1247	}
   1248
   1249	return 0;
   1250}
   1251
   1252static int aldebaran_set_power_limit(struct smu_context *smu,
   1253				     enum smu_ppt_limit_type limit_type,
   1254				     uint32_t limit)
   1255{
   1256	/* Power limit can be set only through primary die */
   1257	if (aldebaran_is_primary(smu))
   1258		return smu_v13_0_set_power_limit(smu, limit_type, limit);
   1259
   1260	return -EINVAL;
   1261}
   1262
   1263static int aldebaran_system_features_control(struct  smu_context *smu, bool enable)
   1264{
   1265	int ret;
   1266
   1267	ret = smu_v13_0_system_features_control(smu, enable);
   1268	if (!ret && enable)
   1269		ret = aldebaran_run_btc(smu);
   1270
   1271	return ret;
   1272}
   1273
   1274static int aldebaran_set_performance_level(struct smu_context *smu,
   1275					   enum amd_dpm_forced_level level)
   1276{
   1277	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
   1278	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
   1279	struct smu_13_0_dpm_table *gfx_table =
   1280		&dpm_context->dpm_tables.gfx_table;
   1281	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
   1282
   1283	/* Disable determinism if switching to another mode */
   1284	if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
   1285	    (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
   1286		smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
   1287		pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
   1288	}
   1289
   1290	switch (level) {
   1291
   1292	case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
   1293		return 0;
   1294
   1295	case AMD_DPM_FORCED_LEVEL_HIGH:
   1296	case AMD_DPM_FORCED_LEVEL_LOW:
   1297	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
   1298	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
   1299	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
   1300	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
   1301	default:
   1302		break;
   1303	}
   1304
   1305	return smu_v13_0_set_performance_level(smu, level);
   1306}
   1307
   1308static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
   1309					  enum smu_clk_type clk_type,
   1310					  uint32_t min,
   1311					  uint32_t max)
   1312{
   1313	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
   1314	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
   1315	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
   1316	struct amdgpu_device *adev = smu->adev;
   1317	uint32_t min_clk;
   1318	uint32_t max_clk;
   1319	int ret = 0;
   1320
   1321	if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
   1322		return -EINVAL;
   1323
   1324	if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
   1325			&& (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
   1326		return -EINVAL;
   1327
   1328	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
   1329		if (min >= max) {
   1330			dev_err(smu->adev->dev,
   1331				"Minimum GFX clk should be less than the maximum allowed clock\n");
   1332			return -EINVAL;
   1333		}
   1334
   1335		if ((min == pstate_table->gfxclk_pstate.curr.min) &&
   1336		    (max == pstate_table->gfxclk_pstate.curr.max))
   1337			return 0;
   1338
   1339		ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK,
   1340							    min, max);
   1341		if (!ret) {
   1342			pstate_table->gfxclk_pstate.curr.min = min;
   1343			pstate_table->gfxclk_pstate.curr.max = max;
   1344		}
   1345
   1346		return ret;
   1347	}
   1348
   1349	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
   1350		if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
   1351			(max > dpm_context->dpm_tables.gfx_table.max)) {
   1352			dev_warn(adev->dev,
   1353					"Invalid max frequency %d MHz specified for determinism\n", max);
   1354			return -EINVAL;
   1355		}
   1356
   1357		/* Restore default min/max clocks and enable determinism */
   1358		min_clk = dpm_context->dpm_tables.gfx_table.min;
   1359		max_clk = dpm_context->dpm_tables.gfx_table.max;
   1360		ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
   1361		if (!ret) {
   1362			usleep_range(500, 1000);
   1363			ret = smu_cmn_send_smc_msg_with_param(smu,
   1364					SMU_MSG_EnableDeterminism,
   1365					max, NULL);
   1366			if (ret) {
   1367				dev_err(adev->dev,
   1368						"Failed to enable determinism at GFX clock %d MHz\n", max);
   1369			} else {
   1370				pstate_table->gfxclk_pstate.curr.min = min_clk;
   1371				pstate_table->gfxclk_pstate.curr.max = max;
   1372			}
   1373		}
   1374	}
   1375
   1376	return ret;
   1377}
   1378
   1379static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
   1380							long input[], uint32_t size)
   1381{
   1382	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
   1383	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
   1384	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
   1385	uint32_t min_clk;
   1386	uint32_t max_clk;
   1387	int ret = 0;
   1388
   1389	/* Only allowed in manual or determinism mode */
   1390	if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
   1391			&& (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
   1392		return -EINVAL;
   1393
   1394	switch (type) {
   1395	case PP_OD_EDIT_SCLK_VDDC_TABLE:
   1396		if (size != 2) {
   1397			dev_err(smu->adev->dev, "Input parameter number not correct\n");
   1398			return -EINVAL;
   1399		}
   1400
   1401		if (input[0] == 0) {
   1402			if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
   1403				dev_warn(smu->adev->dev, "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
   1404					input[1], dpm_context->dpm_tables.gfx_table.min);
   1405				pstate_table->gfxclk_pstate.custom.min =
   1406					pstate_table->gfxclk_pstate.curr.min;
   1407				return -EINVAL;
   1408			}
   1409
   1410			pstate_table->gfxclk_pstate.custom.min = input[1];
   1411		} else if (input[0] == 1) {
   1412			if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
   1413				dev_warn(smu->adev->dev, "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
   1414					input[1], dpm_context->dpm_tables.gfx_table.max);
   1415				pstate_table->gfxclk_pstate.custom.max =
   1416					pstate_table->gfxclk_pstate.curr.max;
   1417				return -EINVAL;
   1418			}
   1419
   1420			pstate_table->gfxclk_pstate.custom.max = input[1];
   1421		} else {
   1422			return -EINVAL;
   1423		}
   1424		break;
   1425	case PP_OD_RESTORE_DEFAULT_TABLE:
   1426		if (size != 0) {
   1427			dev_err(smu->adev->dev, "Input parameter number not correct\n");
   1428			return -EINVAL;
   1429		} else {
   1430			/* Use the default frequencies for manual and determinism mode */
   1431			min_clk = dpm_context->dpm_tables.gfx_table.min;
   1432			max_clk = dpm_context->dpm_tables.gfx_table.max;
   1433
   1434			return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
   1435		}
   1436		break;
   1437	case PP_OD_COMMIT_DPM_TABLE:
   1438		if (size != 0) {
   1439			dev_err(smu->adev->dev, "Input parameter number not correct\n");
   1440			return -EINVAL;
   1441		} else {
   1442			if (!pstate_table->gfxclk_pstate.custom.min)
   1443				pstate_table->gfxclk_pstate.custom.min =
   1444					pstate_table->gfxclk_pstate.curr.min;
   1445
   1446			if (!pstate_table->gfxclk_pstate.custom.max)
   1447				pstate_table->gfxclk_pstate.custom.max =
   1448					pstate_table->gfxclk_pstate.curr.max;
   1449
   1450			min_clk = pstate_table->gfxclk_pstate.custom.min;
   1451			max_clk = pstate_table->gfxclk_pstate.custom.max;
   1452
   1453			return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
   1454		}
   1455		break;
   1456	default:
   1457		return -ENOSYS;
   1458	}
   1459
   1460	return ret;
   1461}
   1462
   1463static bool aldebaran_is_dpm_running(struct smu_context *smu)
   1464{
   1465	int ret;
   1466	uint64_t feature_enabled;
   1467
   1468	ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
   1469	if (ret)
   1470		return false;
   1471	return !!(feature_enabled & SMC_DPM_FEATURE);
   1472}
   1473
   1474static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
   1475			      struct i2c_msg *msg, int num_msgs)
   1476{
   1477	struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
   1478	struct amdgpu_device *adev = smu_i2c->adev;
   1479	struct smu_context *smu = adev->powerplay.pp_handle;
   1480	struct smu_table_context *smu_table = &smu->smu_table;
   1481	struct smu_table *table = &smu_table->driver_table;
   1482	SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
   1483	int i, j, r, c;
   1484	u16 dir;
   1485
   1486	if (!adev->pm.dpm_enabled)
   1487		return -EBUSY;
   1488
   1489	req = kzalloc(sizeof(*req), GFP_KERNEL);
   1490	if (!req)
   1491		return -ENOMEM;
   1492
   1493	req->I2CcontrollerPort = smu_i2c->port;
   1494	req->I2CSpeed = I2C_SPEED_FAST_400K;
   1495	req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
   1496	dir = msg[0].flags & I2C_M_RD;
   1497
   1498	for (c = i = 0; i < num_msgs; i++) {
   1499		for (j = 0; j < msg[i].len; j++, c++) {
   1500			SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
   1501
   1502			if (!(msg[i].flags & I2C_M_RD)) {
   1503				/* write */
   1504				cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
   1505				cmd->ReadWriteData = msg[i].buf[j];
   1506			}
   1507
   1508			if ((dir ^ msg[i].flags) & I2C_M_RD) {
   1509				/* The direction changes.
   1510				 */
   1511				dir = msg[i].flags & I2C_M_RD;
   1512				cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
   1513			}
   1514
   1515			req->NumCmds++;
   1516
   1517			/*
   1518			 * Insert STOP if we are at the last byte of either last
   1519			 * message for the transaction or the client explicitly
   1520			 * requires a STOP at this particular message.
   1521			 */
   1522			if ((j == msg[i].len - 1) &&
   1523			    ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
   1524				cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
   1525				cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
   1526			}
   1527		}
   1528	}
   1529	mutex_lock(&adev->pm.mutex);
   1530	r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
   1531	mutex_unlock(&adev->pm.mutex);
   1532	if (r)
   1533		goto fail;
   1534
   1535	for (c = i = 0; i < num_msgs; i++) {
   1536		if (!(msg[i].flags & I2C_M_RD)) {
   1537			c += msg[i].len;
   1538			continue;
   1539		}
   1540		for (j = 0; j < msg[i].len; j++, c++) {
   1541			SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
   1542
   1543			msg[i].buf[j] = cmd->ReadWriteData;
   1544		}
   1545	}
   1546	r = num_msgs;
   1547fail:
   1548	kfree(req);
   1549	return r;
   1550}
   1551
   1552static u32 aldebaran_i2c_func(struct i2c_adapter *adap)
   1553{
   1554	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
   1555}
   1556
   1557
   1558static const struct i2c_algorithm aldebaran_i2c_algo = {
   1559	.master_xfer = aldebaran_i2c_xfer,
   1560	.functionality = aldebaran_i2c_func,
   1561};
   1562
   1563static const struct i2c_adapter_quirks aldebaran_i2c_control_quirks = {
   1564	.flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
   1565	.max_read_len  = MAX_SW_I2C_COMMANDS,
   1566	.max_write_len = MAX_SW_I2C_COMMANDS,
   1567	.max_comb_1st_msg_len = 2,
   1568	.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
   1569};
   1570
   1571static int aldebaran_i2c_control_init(struct smu_context *smu)
   1572{
   1573	struct amdgpu_device *adev = smu->adev;
   1574	struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[0];
   1575	struct i2c_adapter *control = &smu_i2c->adapter;
   1576	int res;
   1577
   1578	smu_i2c->adev = adev;
   1579	smu_i2c->port = 0;
   1580	mutex_init(&smu_i2c->mutex);
   1581	control->owner = THIS_MODULE;
   1582	control->class = I2C_CLASS_SPD;
   1583	control->dev.parent = &adev->pdev->dev;
   1584	control->algo = &aldebaran_i2c_algo;
   1585	snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0");
   1586	control->quirks = &aldebaran_i2c_control_quirks;
   1587	i2c_set_adapdata(control, smu_i2c);
   1588
   1589	res = i2c_add_adapter(control);
   1590	if (res) {
   1591		DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
   1592		goto Out_err;
   1593	}
   1594
   1595	adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
   1596	adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
   1597
   1598	return 0;
   1599Out_err:
   1600	i2c_del_adapter(control);
   1601
   1602	return res;
   1603}
   1604
   1605static void aldebaran_i2c_control_fini(struct smu_context *smu)
   1606{
   1607	struct amdgpu_device *adev = smu->adev;
   1608	int i;
   1609
   1610	for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
   1611		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
   1612		struct i2c_adapter *control = &smu_i2c->adapter;
   1613
   1614		i2c_del_adapter(control);
   1615	}
   1616	adev->pm.ras_eeprom_i2c_bus = NULL;
   1617	adev->pm.fru_eeprom_i2c_bus = NULL;
   1618}
   1619
   1620static void aldebaran_get_unique_id(struct smu_context *smu)
   1621{
   1622	struct amdgpu_device *adev = smu->adev;
   1623	uint32_t upper32 = 0, lower32 = 0;
   1624
   1625	if (aldebaran_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_UPPER32, &upper32))
   1626		goto out;
   1627	if (aldebaran_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_LOWER32, &lower32))
   1628		goto out;
   1629
   1630out:
   1631	adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
   1632	if (adev->serial[0] == '\0')
   1633		sprintf(adev->serial, "%016llx", adev->unique_id);
   1634}
   1635
   1636static bool aldebaran_is_baco_supported(struct smu_context *smu)
   1637{
   1638	/* aldebaran is not support baco */
   1639
   1640	return false;
   1641}
   1642
   1643static int aldebaran_set_df_cstate(struct smu_context *smu,
   1644				   enum pp_df_cstate state)
   1645{
   1646	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
   1647}
   1648
   1649static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
   1650{
   1651	struct amdgpu_device *adev = smu->adev;
   1652
   1653	/* The message only works on master die and NACK will be sent
   1654	   back for other dies, only send it on master die */
   1655	if (!adev->smuio.funcs->get_socket_id(adev) &&
   1656	    !adev->smuio.funcs->get_die_id(adev))
   1657		return smu_cmn_send_smc_msg_with_param(smu,
   1658				   SMU_MSG_GmiPwrDnControl,
   1659				   en ? 0 : 1,
   1660				   NULL);
   1661	else
   1662		return 0;
   1663}
   1664
   1665static const struct throttling_logging_label {
   1666	uint32_t feature_mask;
   1667	const char *label;
   1668} logging_label[] = {
   1669	{(1U << THROTTLER_TEMP_GPU_BIT), "GPU"},
   1670	{(1U << THROTTLER_TEMP_MEM_BIT), "HBM"},
   1671	{(1U << THROTTLER_TEMP_VR_GFX_BIT), "VR of GFX rail"},
   1672	{(1U << THROTTLER_TEMP_VR_MEM_BIT), "VR of HBM rail"},
   1673	{(1U << THROTTLER_TEMP_VR_SOC_BIT), "VR of SOC rail"},
   1674};
   1675static void aldebaran_log_thermal_throttling_event(struct smu_context *smu)
   1676{
   1677	int ret;
   1678	int throttler_idx, throtting_events = 0, buf_idx = 0;
   1679	struct amdgpu_device *adev = smu->adev;
   1680	uint32_t throttler_status;
   1681	char log_buf[256];
   1682
   1683	ret = aldebaran_get_smu_metrics_data(smu,
   1684					     METRICS_THROTTLER_STATUS,
   1685					     &throttler_status);
   1686	if (ret)
   1687		return;
   1688
   1689	memset(log_buf, 0, sizeof(log_buf));
   1690	for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
   1691	     throttler_idx++) {
   1692		if (throttler_status & logging_label[throttler_idx].feature_mask) {
   1693			throtting_events++;
   1694			buf_idx += snprintf(log_buf + buf_idx,
   1695					    sizeof(log_buf) - buf_idx,
   1696					    "%s%s",
   1697					    throtting_events > 1 ? " and " : "",
   1698					    logging_label[throttler_idx].label);
   1699			if (buf_idx >= sizeof(log_buf)) {
   1700				dev_err(adev->dev, "buffer overflow!\n");
   1701				log_buf[sizeof(log_buf) - 1] = '\0';
   1702				break;
   1703			}
   1704		}
   1705	}
   1706
   1707	dev_warn(adev->dev, "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n",
   1708		 log_buf);
   1709	kgd2kfd_smi_event_throttle(smu->adev->kfd.dev,
   1710		smu_cmn_get_indep_throttler_status(throttler_status,
   1711						   aldebaran_throttler_map));
   1712}
   1713
   1714static int aldebaran_get_current_pcie_link_speed(struct smu_context *smu)
   1715{
   1716	struct amdgpu_device *adev = smu->adev;
   1717	uint32_t esm_ctrl;
   1718
   1719	/* TODO: confirm this on real target */
   1720	esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
   1721	if ((esm_ctrl >> 15) & 0x1FFFF)
   1722		return (((esm_ctrl >> 8) & 0x3F) + 128);
   1723
   1724	return smu_v13_0_get_current_pcie_link_speed(smu);
   1725}
   1726
   1727static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
   1728					 void **table)
   1729{
   1730	struct smu_table_context *smu_table = &smu->smu_table;
   1731	struct gpu_metrics_v1_3 *gpu_metrics =
   1732		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
   1733	SmuMetrics_t metrics;
   1734	int i, ret = 0;
   1735
   1736	ret = smu_cmn_get_metrics_table(smu,
   1737					&metrics,
   1738					true);
   1739	if (ret)
   1740		return ret;
   1741
   1742	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
   1743
   1744	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
   1745	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
   1746	gpu_metrics->temperature_mem = metrics.TemperatureHBM;
   1747	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
   1748	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
   1749	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem;
   1750
   1751	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
   1752	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
   1753	gpu_metrics->average_mm_activity = 0;
   1754
   1755	/* Valid power data is available only from primary die */
   1756	if (aldebaran_is_primary(smu)) {
   1757		gpu_metrics->average_socket_power = metrics.AverageSocketPower;
   1758		gpu_metrics->energy_accumulator =
   1759			(uint64_t)metrics.EnergyAcc64bitHigh << 32 |
   1760			metrics.EnergyAcc64bitLow;
   1761	} else {
   1762		gpu_metrics->average_socket_power = 0;
   1763		gpu_metrics->energy_accumulator = 0;
   1764	}
   1765
   1766	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
   1767	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
   1768	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
   1769	gpu_metrics->average_vclk0_frequency = 0;
   1770	gpu_metrics->average_dclk0_frequency = 0;
   1771
   1772	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
   1773	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
   1774	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
   1775	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
   1776	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
   1777
   1778	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
   1779	gpu_metrics->indep_throttle_status =
   1780			smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
   1781							   aldebaran_throttler_map);
   1782
   1783	gpu_metrics->current_fan_speed = 0;
   1784
   1785	gpu_metrics->pcie_link_width =
   1786		smu_v13_0_get_current_pcie_link_width(smu);
   1787	gpu_metrics->pcie_link_speed =
   1788		aldebaran_get_current_pcie_link_speed(smu);
   1789
   1790	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
   1791
   1792	gpu_metrics->gfx_activity_acc = metrics.GfxBusyAcc;
   1793	gpu_metrics->mem_activity_acc = metrics.DramBusyAcc;
   1794
   1795	for (i = 0; i < NUM_HBM_INSTANCES; i++)
   1796		gpu_metrics->temperature_hbm[i] = metrics.TemperatureAllHBM[i];
   1797
   1798	gpu_metrics->firmware_timestamp = ((uint64_t)metrics.TimeStampHigh << 32) |
   1799					metrics.TimeStampLow;
   1800
   1801	*table = (void *)gpu_metrics;
   1802
   1803	return sizeof(struct gpu_metrics_v1_3);
   1804}
   1805
   1806static int aldebaran_check_ecc_table_support(struct smu_context *smu)
   1807{
   1808	uint32_t if_version = 0xff, smu_version = 0xff;
   1809	int ret = 0;
   1810
   1811	ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
   1812	if (ret) {
   1813		/* return not support if failed get smu_version */
   1814		ret = -EOPNOTSUPP;
   1815	}
   1816
   1817	if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION)
   1818		ret = -EOPNOTSUPP;
   1819
   1820	return ret;
   1821}
   1822
   1823static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
   1824					 void *table)
   1825{
   1826	struct smu_table_context *smu_table = &smu->smu_table;
   1827	EccInfoTable_t *ecc_table = NULL;
   1828	struct ecc_info_per_ch *ecc_info_per_channel = NULL;
   1829	int i, ret = 0;
   1830	struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
   1831
   1832	ret = aldebaran_check_ecc_table_support(smu);
   1833	if (ret)
   1834		return ret;
   1835
   1836	ret = smu_cmn_update_table(smu,
   1837			       SMU_TABLE_ECCINFO,
   1838			       0,
   1839			       smu_table->ecc_table,
   1840			       false);
   1841	if (ret) {
   1842		dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n");
   1843		return ret;
   1844	}
   1845
   1846	ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
   1847
   1848	for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) {
   1849		ecc_info_per_channel = &(eccinfo->ecc[i]);
   1850		ecc_info_per_channel->ce_count_lo_chip =
   1851			ecc_table->EccInfo[i].ce_count_lo_chip;
   1852		ecc_info_per_channel->ce_count_hi_chip =
   1853			ecc_table->EccInfo[i].ce_count_hi_chip;
   1854		ecc_info_per_channel->mca_umc_status =
   1855			ecc_table->EccInfo[i].mca_umc_status;
   1856		ecc_info_per_channel->mca_umc_addr =
   1857			ecc_table->EccInfo[i].mca_umc_addr;
   1858	}
   1859
   1860	return ret;
   1861}
   1862
   1863static int aldebaran_mode1_reset(struct smu_context *smu)
   1864{
   1865	u32 smu_version, fatal_err, param;
   1866	int ret = 0;
   1867	struct amdgpu_device *adev = smu->adev;
   1868	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
   1869
   1870	fatal_err = 0;
   1871	param = SMU_RESET_MODE_1;
   1872
   1873	/*
   1874	* PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07
   1875	*/
   1876	smu_cmn_get_smc_version(smu, NULL, &smu_version);
   1877	if (smu_version < 0x00440700) {
   1878		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
   1879	}
   1880	else {
   1881		/* fatal error triggered by ras, PMFW supports the flag
   1882		   from 68.44.0 */
   1883		if ((smu_version >= 0x00442c00) && ras &&
   1884		    atomic_read(&ras->in_recovery))
   1885			fatal_err = 1;
   1886
   1887		param |= (fatal_err << 16);
   1888		ret = smu_cmn_send_smc_msg_with_param(smu,
   1889					SMU_MSG_GfxDeviceDriverReset, param, NULL);
   1890	}
   1891
   1892	if (!ret)
   1893		msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
   1894
   1895	return ret;
   1896}
   1897
   1898static int aldebaran_mode2_reset(struct smu_context *smu)
   1899{
   1900	u32 smu_version;
   1901	int ret = 0, index;
   1902	struct amdgpu_device *adev = smu->adev;
   1903	int timeout = 10;
   1904
   1905	smu_cmn_get_smc_version(smu, NULL, &smu_version);
   1906
   1907	index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
   1908						SMU_MSG_GfxDeviceDriverReset);
   1909
   1910	mutex_lock(&smu->message_lock);
   1911	if (smu_version >= 0x00441400) {
   1912		ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
   1913		/* This is similar to FLR, wait till max FLR timeout */
   1914		msleep(100);
   1915		dev_dbg(smu->adev->dev, "restore config space...\n");
   1916		/* Restore the config space saved during init */
   1917		amdgpu_device_load_pci_state(adev->pdev);
   1918
   1919		dev_dbg(smu->adev->dev, "wait for reset ack\n");
   1920		while (ret == -ETIME && timeout)  {
   1921			ret = smu_cmn_wait_for_response(smu);
   1922			/* Wait a bit more time for getting ACK */
   1923			if (ret == -ETIME) {
   1924				--timeout;
   1925				usleep_range(500, 1000);
   1926				continue;
   1927			}
   1928
   1929			if (ret != 1) {
   1930				dev_err(adev->dev, "failed to send mode2 message \tparam: 0x%08x response %#x\n",
   1931						SMU_RESET_MODE_2, ret);
   1932				goto out;
   1933			}
   1934		}
   1935
   1936	} else {
   1937		dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n",
   1938				smu_version);
   1939	}
   1940
   1941	if (ret == 1)
   1942		ret = 0;
   1943out:
   1944	mutex_unlock(&smu->message_lock);
   1945
   1946	return ret;
   1947}
   1948
   1949static int aldebaran_smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
   1950{
   1951	int ret = 0;
   1952	ret =  smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_HeavySBR, enable ? 1 : 0, NULL);
   1953
   1954	return ret;
   1955}
   1956
   1957static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
   1958{
   1959#if 0
   1960	struct amdgpu_device *adev = smu->adev;
   1961	u32 smu_version;
   1962	uint32_t val;
   1963	/**
   1964	 * PM FW version support mode1 reset from 68.07
   1965	 */
   1966	smu_cmn_get_smc_version(smu, NULL, &smu_version);
   1967	if ((smu_version < 0x00440700))
   1968		return false;
   1969	/**
   1970	 * mode1 reset relies on PSP, so we should check if
   1971	 * PSP is alive.
   1972	 */
   1973	val = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
   1974
   1975	return val != 0x0;
   1976#endif
   1977	return true;
   1978}
   1979
   1980static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
   1981{
   1982	return true;
   1983}
   1984
   1985static int aldebaran_set_mp1_state(struct smu_context *smu,
   1986				   enum pp_mp1_state mp1_state)
   1987{
   1988	switch (mp1_state) {
   1989	case PP_MP1_STATE_UNLOAD:
   1990		return smu_cmn_set_mp1_state(smu, mp1_state);
   1991	default:
   1992		return 0;
   1993	}
   1994}
   1995
   1996static int aldebaran_smu_send_hbm_bad_page_num(struct smu_context *smu,
   1997		uint32_t size)
   1998{
   1999	int ret = 0;
   2000
   2001	/* message SMU to update the bad page number on SMUBUS */
   2002	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL);
   2003	if (ret)
   2004		dev_err(smu->adev->dev, "[%s] failed to message SMU to update HBM bad pages number\n",
   2005				__func__);
   2006
   2007	return ret;
   2008}
   2009
   2010static int aldebaran_check_bad_channel_info_support(struct smu_context *smu)
   2011{
   2012	uint32_t if_version = 0xff, smu_version = 0xff;
   2013	int ret = 0;
   2014
   2015	ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
   2016	if (ret) {
   2017		/* return not support if failed get smu_version */
   2018		ret = -EOPNOTSUPP;
   2019	}
   2020
   2021	if (smu_version < SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION)
   2022		ret = -EOPNOTSUPP;
   2023
   2024	return ret;
   2025}
   2026
   2027static int aldebaran_send_hbm_bad_channel_flag(struct smu_context *smu,
   2028		uint32_t size)
   2029{
   2030	int ret = 0;
   2031
   2032	ret = aldebaran_check_bad_channel_info_support(smu);
   2033	if (ret)
   2034		return ret;
   2035
   2036	/* message SMU to update the bad channel info on SMUBUS */
   2037	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetBadHBMPagesRetiredFlagsPerChannel, size, NULL);
   2038	if (ret)
   2039		dev_err(smu->adev->dev, "[%s] failed to message SMU to update HBM bad channel info\n",
   2040				__func__);
   2041
   2042	return ret;
   2043}
   2044
   2045static const struct pptable_funcs aldebaran_ppt_funcs = {
   2046	/* init dpm */
   2047	.get_allowed_feature_mask = aldebaran_get_allowed_feature_mask,
   2048	/* dpm/clk tables */
   2049	.set_default_dpm_table = aldebaran_set_default_dpm_table,
   2050	.populate_umd_state_clk = aldebaran_populate_umd_state_clk,
   2051	.get_thermal_temperature_range = aldebaran_get_thermal_temperature_range,
   2052	.print_clk_levels = aldebaran_print_clk_levels,
   2053	.force_clk_levels = aldebaran_force_clk_levels,
   2054	.read_sensor = aldebaran_read_sensor,
   2055	.set_performance_level = aldebaran_set_performance_level,
   2056	.get_power_limit = aldebaran_get_power_limit,
   2057	.is_dpm_running = aldebaran_is_dpm_running,
   2058	.get_unique_id = aldebaran_get_unique_id,
   2059	.init_microcode = smu_v13_0_init_microcode,
   2060	.load_microcode = smu_v13_0_load_microcode,
   2061	.fini_microcode = smu_v13_0_fini_microcode,
   2062	.init_smc_tables = aldebaran_init_smc_tables,
   2063	.fini_smc_tables = smu_v13_0_fini_smc_tables,
   2064	.init_power = smu_v13_0_init_power,
   2065	.fini_power = smu_v13_0_fini_power,
   2066	.check_fw_status = smu_v13_0_check_fw_status,
   2067	/* pptable related */
   2068	.setup_pptable = aldebaran_setup_pptable,
   2069	.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
   2070	.check_fw_version = smu_v13_0_check_fw_version,
   2071	.write_pptable = smu_cmn_write_pptable,
   2072	.set_driver_table_location = smu_v13_0_set_driver_table_location,
   2073	.set_tool_table_location = smu_v13_0_set_tool_table_location,
   2074	.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
   2075	.system_features_control = aldebaran_system_features_control,
   2076	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
   2077	.send_smc_msg = smu_cmn_send_smc_msg,
   2078	.get_enabled_mask = smu_cmn_get_enabled_mask,
   2079	.feature_is_enabled = smu_cmn_feature_is_enabled,
   2080	.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
   2081	.set_power_limit = aldebaran_set_power_limit,
   2082	.init_max_sustainable_clocks = smu_v13_0_init_max_sustainable_clocks,
   2083	.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
   2084	.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
   2085	.set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
   2086	.register_irq_handler = smu_v13_0_register_irq_handler,
   2087	.set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
   2088	.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
   2089	.baco_is_support= aldebaran_is_baco_supported,
   2090	.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
   2091	.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
   2092	.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
   2093	.set_df_cstate = aldebaran_set_df_cstate,
   2094	.allow_xgmi_power_down = aldebaran_allow_xgmi_power_down,
   2095	.log_thermal_throttling_event = aldebaran_log_thermal_throttling_event,
   2096	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
   2097	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
   2098	.get_gpu_metrics = aldebaran_get_gpu_metrics,
   2099	.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
   2100	.mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
   2101	.smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
   2102	.mode1_reset = aldebaran_mode1_reset,
   2103	.set_mp1_state = aldebaran_set_mp1_state,
   2104	.mode2_reset = aldebaran_mode2_reset,
   2105	.wait_for_event = smu_v13_0_wait_for_event,
   2106	.i2c_init = aldebaran_i2c_control_init,
   2107	.i2c_fini = aldebaran_i2c_control_fini,
   2108	.send_hbm_bad_pages_num = aldebaran_smu_send_hbm_bad_page_num,
   2109	.get_ecc_info = aldebaran_get_ecc_info,
   2110	.send_hbm_bad_channel_flag = aldebaran_send_hbm_bad_channel_flag,
   2111};
   2112
   2113void aldebaran_set_ppt_funcs(struct smu_context *smu)
   2114{
   2115	smu->ppt_funcs = &aldebaran_ppt_funcs;
   2116	smu->message_map = aldebaran_message_map;
   2117	smu->clock_map = aldebaran_clk_map;
   2118	smu->feature_map = aldebaran_feature_mask_map;
   2119	smu->table_map = aldebaran_table_map;
   2120}