cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

navi10_ppt.c (113744B)


      1/*
      2 * Copyright 2019 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#define SWSMU_CODE_LAYER_L2
     25
     26#include <linux/firmware.h>
     27#include <linux/pci.h>
     28#include <linux/i2c.h>
     29#include "amdgpu.h"
     30#include "amdgpu_dpm.h"
     31#include "amdgpu_smu.h"
     32#include "atomfirmware.h"
     33#include "amdgpu_atomfirmware.h"
     34#include "amdgpu_atombios.h"
     35#include "soc15_common.h"
     36#include "smu_v11_0.h"
     37#include "smu11_driver_if_navi10.h"
     38#include "atom.h"
     39#include "navi10_ppt.h"
     40#include "smu_v11_0_pptable.h"
     41#include "smu_v11_0_ppsmc.h"
     42#include "nbio/nbio_2_3_offset.h"
     43#include "nbio/nbio_2_3_sh_mask.h"
     44#include "thm/thm_11_0_2_offset.h"
     45#include "thm/thm_11_0_2_sh_mask.h"
     46
     47#include "asic_reg/mp/mp_11_0_sh_mask.h"
     48#include "smu_cmn.h"
     49#include "smu_11_0_cdr_table.h"
     50
     51/*
     52 * DO NOT use these for err/warn/info/debug messages.
     53 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
     54 * They are more MGPU friendly.
     55 */
     56#undef pr_err
     57#undef pr_warn
     58#undef pr_info
     59#undef pr_debug
     60
     61#define FEATURE_MASK(feature) (1ULL << feature)
     62#define SMC_DPM_FEATURE ( \
     63	FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
     64	FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)	 | \
     65	FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT)	 | \
     66	FEATURE_MASK(FEATURE_DPM_UCLK_BIT)	 | \
     67	FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)	 | \
     68	FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)	 | \
     69	FEATURE_MASK(FEATURE_DPM_LINK_BIT)	 | \
     70	FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
     71
     72#define SMU_11_0_GFX_BUSY_THRESHOLD 15
     73
     74static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
     75	MSG_MAP(TestMessage,			PPSMC_MSG_TestMessage,			1),
     76	MSG_MAP(GetSmuVersion,			PPSMC_MSG_GetSmuVersion,		1),
     77	MSG_MAP(GetDriverIfVersion,		PPSMC_MSG_GetDriverIfVersion,		1),
     78	MSG_MAP(SetAllowedFeaturesMaskLow,	PPSMC_MSG_SetAllowedFeaturesMaskLow,	0),
     79	MSG_MAP(SetAllowedFeaturesMaskHigh,	PPSMC_MSG_SetAllowedFeaturesMaskHigh,	0),
     80	MSG_MAP(EnableAllSmuFeatures,		PPSMC_MSG_EnableAllSmuFeatures,		0),
     81	MSG_MAP(DisableAllSmuFeatures,		PPSMC_MSG_DisableAllSmuFeatures,	0),
     82	MSG_MAP(EnableSmuFeaturesLow,		PPSMC_MSG_EnableSmuFeaturesLow,		0),
     83	MSG_MAP(EnableSmuFeaturesHigh,		PPSMC_MSG_EnableSmuFeaturesHigh,	0),
     84	MSG_MAP(DisableSmuFeaturesLow,		PPSMC_MSG_DisableSmuFeaturesLow,	0),
     85	MSG_MAP(DisableSmuFeaturesHigh,		PPSMC_MSG_DisableSmuFeaturesHigh,	0),
     86	MSG_MAP(GetEnabledSmuFeaturesLow,	PPSMC_MSG_GetEnabledSmuFeaturesLow,	1),
     87	MSG_MAP(GetEnabledSmuFeaturesHigh,	PPSMC_MSG_GetEnabledSmuFeaturesHigh,	1),
     88	MSG_MAP(SetWorkloadMask,		PPSMC_MSG_SetWorkloadMask,		0),
     89	MSG_MAP(SetPptLimit,			PPSMC_MSG_SetPptLimit,			0),
     90	MSG_MAP(SetDriverDramAddrHigh,		PPSMC_MSG_SetDriverDramAddrHigh,	1),
     91	MSG_MAP(SetDriverDramAddrLow,		PPSMC_MSG_SetDriverDramAddrLow,		1),
     92	MSG_MAP(SetToolsDramAddrHigh,		PPSMC_MSG_SetToolsDramAddrHigh,		0),
     93	MSG_MAP(SetToolsDramAddrLow,		PPSMC_MSG_SetToolsDramAddrLow,		0),
     94	MSG_MAP(TransferTableSmu2Dram,		PPSMC_MSG_TransferTableSmu2Dram,	1),
     95	MSG_MAP(TransferTableDram2Smu,		PPSMC_MSG_TransferTableDram2Smu,	0),
     96	MSG_MAP(UseDefaultPPTable,		PPSMC_MSG_UseDefaultPPTable,		0),
     97	MSG_MAP(UseBackupPPTable,		PPSMC_MSG_UseBackupPPTable,		0),
     98	MSG_MAP(RunBtc,				PPSMC_MSG_RunBtc,			0),
     99	MSG_MAP(EnterBaco,			PPSMC_MSG_EnterBaco,			0),
    100	MSG_MAP(SetSoftMinByFreq,		PPSMC_MSG_SetSoftMinByFreq,		1),
    101	MSG_MAP(SetSoftMaxByFreq,		PPSMC_MSG_SetSoftMaxByFreq,		1),
    102	MSG_MAP(SetHardMinByFreq,		PPSMC_MSG_SetHardMinByFreq,		0),
    103	MSG_MAP(SetHardMaxByFreq,		PPSMC_MSG_SetHardMaxByFreq,		0),
    104	MSG_MAP(GetMinDpmFreq,			PPSMC_MSG_GetMinDpmFreq,		1),
    105	MSG_MAP(GetMaxDpmFreq,			PPSMC_MSG_GetMaxDpmFreq,		1),
    106	MSG_MAP(GetDpmFreqByIndex,		PPSMC_MSG_GetDpmFreqByIndex,		1),
    107	MSG_MAP(SetMemoryChannelConfig,		PPSMC_MSG_SetMemoryChannelConfig,	0),
    108	MSG_MAP(SetGeminiMode,			PPSMC_MSG_SetGeminiMode,		0),
    109	MSG_MAP(SetGeminiApertureHigh,		PPSMC_MSG_SetGeminiApertureHigh,	0),
    110	MSG_MAP(SetGeminiApertureLow,		PPSMC_MSG_SetGeminiApertureLow,		0),
    111	MSG_MAP(OverridePcieParameters,		PPSMC_MSG_OverridePcieParameters,	0),
    112	MSG_MAP(SetMinDeepSleepDcefclk,		PPSMC_MSG_SetMinDeepSleepDcefclk,	0),
    113	MSG_MAP(ReenableAcDcInterrupt,		PPSMC_MSG_ReenableAcDcInterrupt,	0),
    114	MSG_MAP(NotifyPowerSource,		PPSMC_MSG_NotifyPowerSource,		0),
    115	MSG_MAP(SetUclkFastSwitch,		PPSMC_MSG_SetUclkFastSwitch,		0),
    116	MSG_MAP(SetVideoFps,			PPSMC_MSG_SetVideoFps,			0),
    117	MSG_MAP(PrepareMp1ForUnload,		PPSMC_MSG_PrepareMp1ForUnload,		1),
    118	MSG_MAP(DramLogSetDramAddrHigh,		PPSMC_MSG_DramLogSetDramAddrHigh,	0),
    119	MSG_MAP(DramLogSetDramAddrLow,		PPSMC_MSG_DramLogSetDramAddrLow,	0),
    120	MSG_MAP(DramLogSetDramSize,		PPSMC_MSG_DramLogSetDramSize,		0),
    121	MSG_MAP(ConfigureGfxDidt,		PPSMC_MSG_ConfigureGfxDidt,		0),
    122	MSG_MAP(NumOfDisplays,			PPSMC_MSG_NumOfDisplays,		0),
    123	MSG_MAP(SetSystemVirtualDramAddrHigh,	PPSMC_MSG_SetSystemVirtualDramAddrHigh,	0),
    124	MSG_MAP(SetSystemVirtualDramAddrLow,	PPSMC_MSG_SetSystemVirtualDramAddrLow,	0),
    125	MSG_MAP(AllowGfxOff,			PPSMC_MSG_AllowGfxOff,			0),
    126	MSG_MAP(DisallowGfxOff,			PPSMC_MSG_DisallowGfxOff,		0),
    127	MSG_MAP(GetPptLimit,			PPSMC_MSG_GetPptLimit,			0),
    128	MSG_MAP(GetDcModeMaxDpmFreq,		PPSMC_MSG_GetDcModeMaxDpmFreq,		1),
    129	MSG_MAP(GetDebugData,			PPSMC_MSG_GetDebugData,			0),
    130	MSG_MAP(ExitBaco,			PPSMC_MSG_ExitBaco,			0),
    131	MSG_MAP(PrepareMp1ForReset,		PPSMC_MSG_PrepareMp1ForReset,		0),
    132	MSG_MAP(PrepareMp1ForShutdown,		PPSMC_MSG_PrepareMp1ForShutdown,	0),
    133	MSG_MAP(PowerUpVcn,			PPSMC_MSG_PowerUpVcn,			0),
    134	MSG_MAP(PowerDownVcn,			PPSMC_MSG_PowerDownVcn,			0),
    135	MSG_MAP(PowerUpJpeg,			PPSMC_MSG_PowerUpJpeg,			0),
    136	MSG_MAP(PowerDownJpeg,			PPSMC_MSG_PowerDownJpeg,		0),
    137	MSG_MAP(BacoAudioD3PME,			PPSMC_MSG_BacoAudioD3PME,		0),
    138	MSG_MAP(ArmD3,				PPSMC_MSG_ArmD3,			0),
    139	MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange,	0),
    140	MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE,	PPSMC_MSG_DALEnableDummyPstateChange,	0),
    141	MSG_MAP(GetVoltageByDpm,		PPSMC_MSG_GetVoltageByDpm,		0),
    142	MSG_MAP(GetVoltageByDpmOverdrive,	PPSMC_MSG_GetVoltageByDpmOverdrive,	0),
    143	MSG_MAP(SetMGpuFanBoostLimitRpm,	PPSMC_MSG_SetMGpuFanBoostLimitRpm,	0),
    144	MSG_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH, PPSMC_MSG_SetDriverDummyTableDramAddrHigh, 0),
    145	MSG_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW, PPSMC_MSG_SetDriverDummyTableDramAddrLow, 0),
    146	MSG_MAP(GET_UMC_FW_WA,			PPSMC_MSG_GetUMCFWWA,			0),
    147};
    148
    149static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = {
    150	CLK_MAP(GFXCLK, PPCLK_GFXCLK),
    151	CLK_MAP(SCLK,	PPCLK_GFXCLK),
    152	CLK_MAP(SOCCLK, PPCLK_SOCCLK),
    153	CLK_MAP(FCLK, PPCLK_SOCCLK),
    154	CLK_MAP(UCLK, PPCLK_UCLK),
    155	CLK_MAP(MCLK, PPCLK_UCLK),
    156	CLK_MAP(DCLK, PPCLK_DCLK),
    157	CLK_MAP(VCLK, PPCLK_VCLK),
    158	CLK_MAP(DCEFCLK, PPCLK_DCEFCLK),
    159	CLK_MAP(DISPCLK, PPCLK_DISPCLK),
    160	CLK_MAP(PIXCLK, PPCLK_PIXCLK),
    161	CLK_MAP(PHYCLK, PPCLK_PHYCLK),
    162};
    163
    164static struct cmn2asic_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
    165	FEA_MAP(DPM_PREFETCHER),
    166	FEA_MAP(DPM_GFXCLK),
    167	FEA_MAP(DPM_GFX_PACE),
    168	FEA_MAP(DPM_UCLK),
    169	FEA_MAP(DPM_SOCCLK),
    170	FEA_MAP(DPM_MP0CLK),
    171	FEA_MAP(DPM_LINK),
    172	FEA_MAP(DPM_DCEFCLK),
    173	FEA_MAP(MEM_VDDCI_SCALING),
    174	FEA_MAP(MEM_MVDD_SCALING),
    175	FEA_MAP(DS_GFXCLK),
    176	FEA_MAP(DS_SOCCLK),
    177	FEA_MAP(DS_LCLK),
    178	FEA_MAP(DS_DCEFCLK),
    179	FEA_MAP(DS_UCLK),
    180	FEA_MAP(GFX_ULV),
    181	FEA_MAP(FW_DSTATE),
    182	FEA_MAP(GFXOFF),
    183	FEA_MAP(BACO),
    184	FEA_MAP(VCN_PG),
    185	FEA_MAP(JPEG_PG),
    186	FEA_MAP(USB_PG),
    187	FEA_MAP(RSMU_SMN_CG),
    188	FEA_MAP(PPT),
    189	FEA_MAP(TDC),
    190	FEA_MAP(GFX_EDC),
    191	FEA_MAP(APCC_PLUS),
    192	FEA_MAP(GTHR),
    193	FEA_MAP(ACDC),
    194	FEA_MAP(VR0HOT),
    195	FEA_MAP(VR1HOT),
    196	FEA_MAP(FW_CTF),
    197	FEA_MAP(FAN_CONTROL),
    198	FEA_MAP(THERMAL),
    199	FEA_MAP(GFX_DCS),
    200	FEA_MAP(RM),
    201	FEA_MAP(LED_DISPLAY),
    202	FEA_MAP(GFX_SS),
    203	FEA_MAP(OUT_OF_BAND_MONITOR),
    204	FEA_MAP(TEMP_DEPENDENT_VMIN),
    205	FEA_MAP(MMHUB_PG),
    206	FEA_MAP(ATHUB_PG),
    207	FEA_MAP(APCC_DFLL),
    208};
    209
    210static struct cmn2asic_mapping navi10_table_map[SMU_TABLE_COUNT] = {
    211	TAB_MAP(PPTABLE),
    212	TAB_MAP(WATERMARKS),
    213	TAB_MAP(AVFS),
    214	TAB_MAP(AVFS_PSM_DEBUG),
    215	TAB_MAP(AVFS_FUSE_OVERRIDE),
    216	TAB_MAP(PMSTATUSLOG),
    217	TAB_MAP(SMU_METRICS),
    218	TAB_MAP(DRIVER_SMU_CONFIG),
    219	TAB_MAP(ACTIVITY_MONITOR_COEFF),
    220	TAB_MAP(OVERDRIVE),
    221	TAB_MAP(I2C_COMMANDS),
    222	TAB_MAP(PACE),
    223};
    224
    225static struct cmn2asic_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
    226	PWR_MAP(AC),
    227	PWR_MAP(DC),
    228};
    229
    230static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
    231	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT,	WORKLOAD_PPLIB_DEFAULT_BIT),
    232	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,		WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
    233	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,		WORKLOAD_PPLIB_POWER_SAVING_BIT),
    234	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,		WORKLOAD_PPLIB_VIDEO_BIT),
    235	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,			WORKLOAD_PPLIB_VR_BIT),
    236	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,		WORKLOAD_PPLIB_COMPUTE_BIT),
    237	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,		WORKLOAD_PPLIB_CUSTOM_BIT),
    238};
    239
    240static const uint8_t navi1x_throttler_map[] = {
    241	[THROTTLER_TEMP_EDGE_BIT]	= (SMU_THROTTLER_TEMP_EDGE_BIT),
    242	[THROTTLER_TEMP_HOTSPOT_BIT]	= (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
    243	[THROTTLER_TEMP_MEM_BIT]	= (SMU_THROTTLER_TEMP_MEM_BIT),
    244	[THROTTLER_TEMP_VR_GFX_BIT]	= (SMU_THROTTLER_TEMP_VR_GFX_BIT),
    245	[THROTTLER_TEMP_VR_MEM0_BIT]	= (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
    246	[THROTTLER_TEMP_VR_MEM1_BIT]	= (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
    247	[THROTTLER_TEMP_VR_SOC_BIT]	= (SMU_THROTTLER_TEMP_VR_SOC_BIT),
    248	[THROTTLER_TEMP_LIQUID0_BIT]	= (SMU_THROTTLER_TEMP_LIQUID0_BIT),
    249	[THROTTLER_TEMP_LIQUID1_BIT]	= (SMU_THROTTLER_TEMP_LIQUID1_BIT),
    250	[THROTTLER_TDC_GFX_BIT]		= (SMU_THROTTLER_TDC_GFX_BIT),
    251	[THROTTLER_TDC_SOC_BIT]		= (SMU_THROTTLER_TDC_SOC_BIT),
    252	[THROTTLER_PPT0_BIT]		= (SMU_THROTTLER_PPT0_BIT),
    253	[THROTTLER_PPT1_BIT]		= (SMU_THROTTLER_PPT1_BIT),
    254	[THROTTLER_PPT2_BIT]		= (SMU_THROTTLER_PPT2_BIT),
    255	[THROTTLER_PPT3_BIT]		= (SMU_THROTTLER_PPT3_BIT),
    256	[THROTTLER_FIT_BIT]		= (SMU_THROTTLER_FIT_BIT),
    257	[THROTTLER_PPM_BIT]		= (SMU_THROTTLER_PPM_BIT),
    258	[THROTTLER_APCC_BIT]		= (SMU_THROTTLER_APCC_BIT),
    259};
    260
    261
    262static bool is_asic_secure(struct smu_context *smu)
    263{
    264	struct amdgpu_device *adev = smu->adev;
    265	bool is_secure = true;
    266	uint32_t mp0_fw_intf;
    267
    268	mp0_fw_intf = RREG32_PCIE(MP0_Public |
    269				   (smnMP0_FW_INTF & 0xffffffff));
    270
    271	if (!(mp0_fw_intf & (1 << 19)))
    272		is_secure = false;
    273
    274	return is_secure;
    275}
    276
    277static int
    278navi10_get_allowed_feature_mask(struct smu_context *smu,
    279				  uint32_t *feature_mask, uint32_t num)
    280{
    281	struct amdgpu_device *adev = smu->adev;
    282
    283	if (num > 2)
    284		return -EINVAL;
    285
    286	memset(feature_mask, 0, sizeof(uint32_t) * num);
    287
    288	*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
    289				| FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
    290				| FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
    291				| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
    292				| FEATURE_MASK(FEATURE_PPT_BIT)
    293				| FEATURE_MASK(FEATURE_TDC_BIT)
    294				| FEATURE_MASK(FEATURE_GFX_EDC_BIT)
    295				| FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
    296				| FEATURE_MASK(FEATURE_VR0HOT_BIT)
    297				| FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
    298				| FEATURE_MASK(FEATURE_THERMAL_BIT)
    299				| FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
    300				| FEATURE_MASK(FEATURE_DS_LCLK_BIT)
    301				| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
    302				| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
    303				| FEATURE_MASK(FEATURE_BACO_BIT)
    304				| FEATURE_MASK(FEATURE_GFX_SS_BIT)
    305				| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
    306				| FEATURE_MASK(FEATURE_FW_CTF_BIT)
    307				| FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
    308
    309	if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
    310		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
    311
    312	if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
    313		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
    314
    315	if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
    316		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
    317
    318	if (adev->pm.pp_feature & PP_ULV_MASK)
    319		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
    320
    321	if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
    322		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
    323
    324	if (adev->pm.pp_feature & PP_GFXOFF_MASK)
    325		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
    326
    327	if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
    328		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
    329
    330	if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
    331		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
    332
    333	if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
    334		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
    335
    336	if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
    337		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
    338
    339	if (smu->dc_controlled_by_gpio)
    340		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
    341
    342	if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
    343		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
    344
    345	/* DPM UCLK enablement should be skipped for navi10 A0 secure board */
    346	if (!(is_asic_secure(smu) &&
    347	     (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) &&
    348	     (adev->rev_id == 0)) &&
    349	    (adev->pm.pp_feature & PP_MCLK_DPM_MASK))
    350		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
    351				| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
    352				| FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
    353
    354	/* DS SOCCLK enablement should be skipped for navi10 A0 secure board */
    355	if (is_asic_secure(smu) &&
    356	    (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) &&
    357	    (adev->rev_id == 0))
    358		*(uint64_t *)feature_mask &=
    359				~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
    360
    361	return 0;
    362}
    363
    364static void navi10_check_bxco_support(struct smu_context *smu)
    365{
    366	struct smu_table_context *table_context = &smu->smu_table;
    367	struct smu_11_0_powerplay_table *powerplay_table =
    368		table_context->power_play_table;
    369	struct smu_baco_context *smu_baco = &smu->smu_baco;
    370	struct amdgpu_device *adev = smu->adev;
    371	uint32_t val;
    372
    373	if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
    374	    powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) {
    375		val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
    376		smu_baco->platform_support =
    377			(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
    378									false;
    379	}
    380}
    381
    382static int navi10_check_powerplay_table(struct smu_context *smu)
    383{
    384	struct smu_table_context *table_context = &smu->smu_table;
    385	struct smu_11_0_powerplay_table *powerplay_table =
    386		table_context->power_play_table;
    387
    388	if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
    389		smu->dc_controlled_by_gpio = true;
    390
    391	navi10_check_bxco_support(smu);
    392
    393	table_context->thermal_controller_type =
    394		powerplay_table->thermal_controller_type;
    395
    396	/*
    397	 * Instead of having its own buffer space and get overdrive_table copied,
    398	 * smu->od_settings just points to the actual overdrive_table
    399	 */
    400	smu->od_settings = &powerplay_table->overdrive_table;
    401
    402	return 0;
    403}
    404
    405static int navi10_append_powerplay_table(struct smu_context *smu)
    406{
    407	struct amdgpu_device *adev = smu->adev;
    408	struct smu_table_context *table_context = &smu->smu_table;
    409	PPTable_t *smc_pptable = table_context->driver_pptable;
    410	struct atom_smc_dpm_info_v4_5 *smc_dpm_table;
    411	struct atom_smc_dpm_info_v4_7 *smc_dpm_table_v4_7;
    412	int index, ret;
    413
    414	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
    415					   smc_dpm_info);
    416
    417	ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
    418				      (uint8_t **)&smc_dpm_table);
    419	if (ret)
    420		return ret;
    421
    422	dev_info(adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n",
    423			smc_dpm_table->table_header.format_revision,
    424			smc_dpm_table->table_header.content_revision);
    425
    426	if (smc_dpm_table->table_header.format_revision != 4) {
    427		dev_err(adev->dev, "smc_dpm_info table format revision is not 4!\n");
    428		return -EINVAL;
    429	}
    430
    431	switch (smc_dpm_table->table_header.content_revision) {
    432	case 5: /* nv10 and nv14 */
    433		smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
    434				    smc_dpm_table, I2cControllers);
    435		break;
    436	case 7: /* nv12 */
    437		ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
    438					      (uint8_t **)&smc_dpm_table_v4_7);
    439		if (ret)
    440			return ret;
    441		smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
    442				    smc_dpm_table_v4_7, I2cControllers);
    443		break;
    444	default:
    445		dev_err(smu->adev->dev, "smc_dpm_info with unsupported content revision %d!\n",
    446				smc_dpm_table->table_header.content_revision);
    447		return -EINVAL;
    448	}
    449
    450	if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
    451		/* TODO: remove it once SMU fw fix it */
    452		smc_pptable->DebugOverrides |= DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN;
    453	}
    454
    455	return 0;
    456}
    457
    458static int navi10_store_powerplay_table(struct smu_context *smu)
    459{
    460	struct smu_table_context *table_context = &smu->smu_table;
    461	struct smu_11_0_powerplay_table *powerplay_table =
    462		table_context->power_play_table;
    463
    464	memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
    465	       sizeof(PPTable_t));
    466
    467	return 0;
    468}
    469
    470static int navi10_setup_pptable(struct smu_context *smu)
    471{
    472	int ret = 0;
    473
    474	ret = smu_v11_0_setup_pptable(smu);
    475	if (ret)
    476		return ret;
    477
    478	ret = navi10_store_powerplay_table(smu);
    479	if (ret)
    480		return ret;
    481
    482	ret = navi10_append_powerplay_table(smu);
    483	if (ret)
    484		return ret;
    485
    486	ret = navi10_check_powerplay_table(smu);
    487	if (ret)
    488		return ret;
    489
    490	return ret;
    491}
    492
    493static int navi10_tables_init(struct smu_context *smu)
    494{
    495	struct smu_table_context *smu_table = &smu->smu_table;
    496	struct smu_table *tables = smu_table->tables;
    497
    498	SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
    499		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    500	SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
    501		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    502	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_NV1X_t),
    503		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    504	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
    505		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    506	SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
    507		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    508	SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
    509		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    510	SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
    511		       sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
    512		       AMDGPU_GEM_DOMAIN_VRAM);
    513	SMU_TABLE_INIT(tables, SMU_TABLE_DRIVER_SMU_CONFIG, sizeof(DriverSmuConfig_t),
    514		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    515
    516	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_NV1X_t),
    517					   GFP_KERNEL);
    518	if (!smu_table->metrics_table)
    519		goto err0_out;
    520	smu_table->metrics_time = 0;
    521
    522	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
    523	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
    524	if (!smu_table->gpu_metrics_table)
    525		goto err1_out;
    526
    527	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
    528	if (!smu_table->watermarks_table)
    529		goto err2_out;
    530
    531	smu_table->driver_smu_config_table =
    532		kzalloc(tables[SMU_TABLE_DRIVER_SMU_CONFIG].size, GFP_KERNEL);
    533	if (!smu_table->driver_smu_config_table)
    534		goto err3_out;
    535
    536	return 0;
    537
    538err3_out:
    539	kfree(smu_table->watermarks_table);
    540err2_out:
    541	kfree(smu_table->gpu_metrics_table);
    542err1_out:
    543	kfree(smu_table->metrics_table);
    544err0_out:
    545	return -ENOMEM;
    546}
    547
    548static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
    549					      MetricsMember_t member,
    550					      uint32_t *value)
    551{
    552	struct smu_table_context *smu_table= &smu->smu_table;
    553	SmuMetrics_legacy_t *metrics =
    554		(SmuMetrics_legacy_t *)smu_table->metrics_table;
    555	int ret = 0;
    556
    557	ret = smu_cmn_get_metrics_table(smu,
    558					NULL,
    559					false);
    560	if (ret)
    561		return ret;
    562
    563	switch (member) {
    564	case METRICS_CURR_GFXCLK:
    565		*value = metrics->CurrClock[PPCLK_GFXCLK];
    566		break;
    567	case METRICS_CURR_SOCCLK:
    568		*value = metrics->CurrClock[PPCLK_SOCCLK];
    569		break;
    570	case METRICS_CURR_UCLK:
    571		*value = metrics->CurrClock[PPCLK_UCLK];
    572		break;
    573	case METRICS_CURR_VCLK:
    574		*value = metrics->CurrClock[PPCLK_VCLK];
    575		break;
    576	case METRICS_CURR_DCLK:
    577		*value = metrics->CurrClock[PPCLK_DCLK];
    578		break;
    579	case METRICS_CURR_DCEFCLK:
    580		*value = metrics->CurrClock[PPCLK_DCEFCLK];
    581		break;
    582	case METRICS_AVERAGE_GFXCLK:
    583		*value = metrics->AverageGfxclkFrequency;
    584		break;
    585	case METRICS_AVERAGE_SOCCLK:
    586		*value = metrics->AverageSocclkFrequency;
    587		break;
    588	case METRICS_AVERAGE_UCLK:
    589		*value = metrics->AverageUclkFrequency;
    590		break;
    591	case METRICS_AVERAGE_GFXACTIVITY:
    592		*value = metrics->AverageGfxActivity;
    593		break;
    594	case METRICS_AVERAGE_MEMACTIVITY:
    595		*value = metrics->AverageUclkActivity;
    596		break;
    597	case METRICS_AVERAGE_SOCKETPOWER:
    598		*value = metrics->AverageSocketPower << 8;
    599		break;
    600	case METRICS_TEMPERATURE_EDGE:
    601		*value = metrics->TemperatureEdge *
    602			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    603		break;
    604	case METRICS_TEMPERATURE_HOTSPOT:
    605		*value = metrics->TemperatureHotspot *
    606			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    607		break;
    608	case METRICS_TEMPERATURE_MEM:
    609		*value = metrics->TemperatureMem *
    610			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    611		break;
    612	case METRICS_TEMPERATURE_VRGFX:
    613		*value = metrics->TemperatureVrGfx *
    614			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    615		break;
    616	case METRICS_TEMPERATURE_VRSOC:
    617		*value = metrics->TemperatureVrSoc *
    618			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    619		break;
    620	case METRICS_THROTTLER_STATUS:
    621		*value = metrics->ThrottlerStatus;
    622		break;
    623	case METRICS_CURR_FANSPEED:
    624		*value = metrics->CurrFanSpeed;
    625		break;
    626	default:
    627		*value = UINT_MAX;
    628		break;
    629	}
    630
    631	return ret;
    632}
    633
    634static int navi10_get_smu_metrics_data(struct smu_context *smu,
    635				       MetricsMember_t member,
    636				       uint32_t *value)
    637{
    638	struct smu_table_context *smu_table= &smu->smu_table;
    639	SmuMetrics_t *metrics =
    640		(SmuMetrics_t *)smu_table->metrics_table;
    641	int ret = 0;
    642
    643	ret = smu_cmn_get_metrics_table(smu,
    644					NULL,
    645					false);
    646	if (ret)
    647		return ret;
    648
    649	switch (member) {
    650	case METRICS_CURR_GFXCLK:
    651		*value = metrics->CurrClock[PPCLK_GFXCLK];
    652		break;
    653	case METRICS_CURR_SOCCLK:
    654		*value = metrics->CurrClock[PPCLK_SOCCLK];
    655		break;
    656	case METRICS_CURR_UCLK:
    657		*value = metrics->CurrClock[PPCLK_UCLK];
    658		break;
    659	case METRICS_CURR_VCLK:
    660		*value = metrics->CurrClock[PPCLK_VCLK];
    661		break;
    662	case METRICS_CURR_DCLK:
    663		*value = metrics->CurrClock[PPCLK_DCLK];
    664		break;
    665	case METRICS_CURR_DCEFCLK:
    666		*value = metrics->CurrClock[PPCLK_DCEFCLK];
    667		break;
    668	case METRICS_AVERAGE_GFXCLK:
    669		if (metrics->AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD)
    670			*value = metrics->AverageGfxclkFrequencyPreDs;
    671		else
    672			*value = metrics->AverageGfxclkFrequencyPostDs;
    673		break;
    674	case METRICS_AVERAGE_SOCCLK:
    675		*value = metrics->AverageSocclkFrequency;
    676		break;
    677	case METRICS_AVERAGE_UCLK:
    678		*value = metrics->AverageUclkFrequencyPostDs;
    679		break;
    680	case METRICS_AVERAGE_GFXACTIVITY:
    681		*value = metrics->AverageGfxActivity;
    682		break;
    683	case METRICS_AVERAGE_MEMACTIVITY:
    684		*value = metrics->AverageUclkActivity;
    685		break;
    686	case METRICS_AVERAGE_SOCKETPOWER:
    687		*value = metrics->AverageSocketPower << 8;
    688		break;
    689	case METRICS_TEMPERATURE_EDGE:
    690		*value = metrics->TemperatureEdge *
    691			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    692		break;
    693	case METRICS_TEMPERATURE_HOTSPOT:
    694		*value = metrics->TemperatureHotspot *
    695			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    696		break;
    697	case METRICS_TEMPERATURE_MEM:
    698		*value = metrics->TemperatureMem *
    699			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    700		break;
    701	case METRICS_TEMPERATURE_VRGFX:
    702		*value = metrics->TemperatureVrGfx *
    703			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    704		break;
    705	case METRICS_TEMPERATURE_VRSOC:
    706		*value = metrics->TemperatureVrSoc *
    707			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    708		break;
    709	case METRICS_THROTTLER_STATUS:
    710		*value = metrics->ThrottlerStatus;
    711		break;
    712	case METRICS_CURR_FANSPEED:
    713		*value = metrics->CurrFanSpeed;
    714		break;
    715	default:
    716		*value = UINT_MAX;
    717		break;
    718	}
    719
    720	return ret;
    721}
    722
    723static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
    724					      MetricsMember_t member,
    725					      uint32_t *value)
    726{
    727	struct smu_table_context *smu_table= &smu->smu_table;
    728	SmuMetrics_NV12_legacy_t *metrics =
    729		(SmuMetrics_NV12_legacy_t *)smu_table->metrics_table;
    730	int ret = 0;
    731
    732	ret = smu_cmn_get_metrics_table(smu,
    733					NULL,
    734					false);
    735	if (ret)
    736		return ret;
    737
    738	switch (member) {
    739	case METRICS_CURR_GFXCLK:
    740		*value = metrics->CurrClock[PPCLK_GFXCLK];
    741		break;
    742	case METRICS_CURR_SOCCLK:
    743		*value = metrics->CurrClock[PPCLK_SOCCLK];
    744		break;
    745	case METRICS_CURR_UCLK:
    746		*value = metrics->CurrClock[PPCLK_UCLK];
    747		break;
    748	case METRICS_CURR_VCLK:
    749		*value = metrics->CurrClock[PPCLK_VCLK];
    750		break;
    751	case METRICS_CURR_DCLK:
    752		*value = metrics->CurrClock[PPCLK_DCLK];
    753		break;
    754	case METRICS_CURR_DCEFCLK:
    755		*value = metrics->CurrClock[PPCLK_DCEFCLK];
    756		break;
    757	case METRICS_AVERAGE_GFXCLK:
    758		*value = metrics->AverageGfxclkFrequency;
    759		break;
    760	case METRICS_AVERAGE_SOCCLK:
    761		*value = metrics->AverageSocclkFrequency;
    762		break;
    763	case METRICS_AVERAGE_UCLK:
    764		*value = metrics->AverageUclkFrequency;
    765		break;
    766	case METRICS_AVERAGE_GFXACTIVITY:
    767		*value = metrics->AverageGfxActivity;
    768		break;
    769	case METRICS_AVERAGE_MEMACTIVITY:
    770		*value = metrics->AverageUclkActivity;
    771		break;
    772	case METRICS_AVERAGE_SOCKETPOWER:
    773		*value = metrics->AverageSocketPower << 8;
    774		break;
    775	case METRICS_TEMPERATURE_EDGE:
    776		*value = metrics->TemperatureEdge *
    777			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    778		break;
    779	case METRICS_TEMPERATURE_HOTSPOT:
    780		*value = metrics->TemperatureHotspot *
    781			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    782		break;
    783	case METRICS_TEMPERATURE_MEM:
    784		*value = metrics->TemperatureMem *
    785			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    786		break;
    787	case METRICS_TEMPERATURE_VRGFX:
    788		*value = metrics->TemperatureVrGfx *
    789			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    790		break;
    791	case METRICS_TEMPERATURE_VRSOC:
    792		*value = metrics->TemperatureVrSoc *
    793			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    794		break;
    795	case METRICS_THROTTLER_STATUS:
    796		*value = metrics->ThrottlerStatus;
    797		break;
    798	case METRICS_CURR_FANSPEED:
    799		*value = metrics->CurrFanSpeed;
    800		break;
    801	default:
    802		*value = UINT_MAX;
    803		break;
    804	}
    805
    806	return ret;
    807}
    808
    809static int navi12_get_smu_metrics_data(struct smu_context *smu,
    810				       MetricsMember_t member,
    811				       uint32_t *value)
    812{
    813	struct smu_table_context *smu_table= &smu->smu_table;
    814	SmuMetrics_NV12_t *metrics =
    815		(SmuMetrics_NV12_t *)smu_table->metrics_table;
    816	int ret = 0;
    817
    818	ret = smu_cmn_get_metrics_table(smu,
    819					NULL,
    820					false);
    821	if (ret)
    822		return ret;
    823
    824	switch (member) {
    825	case METRICS_CURR_GFXCLK:
    826		*value = metrics->CurrClock[PPCLK_GFXCLK];
    827		break;
    828	case METRICS_CURR_SOCCLK:
    829		*value = metrics->CurrClock[PPCLK_SOCCLK];
    830		break;
    831	case METRICS_CURR_UCLK:
    832		*value = metrics->CurrClock[PPCLK_UCLK];
    833		break;
    834	case METRICS_CURR_VCLK:
    835		*value = metrics->CurrClock[PPCLK_VCLK];
    836		break;
    837	case METRICS_CURR_DCLK:
    838		*value = metrics->CurrClock[PPCLK_DCLK];
    839		break;
    840	case METRICS_CURR_DCEFCLK:
    841		*value = metrics->CurrClock[PPCLK_DCEFCLK];
    842		break;
    843	case METRICS_AVERAGE_GFXCLK:
    844		if (metrics->AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD)
    845			*value = metrics->AverageGfxclkFrequencyPreDs;
    846		else
    847			*value = metrics->AverageGfxclkFrequencyPostDs;
    848		break;
    849	case METRICS_AVERAGE_SOCCLK:
    850		*value = metrics->AverageSocclkFrequency;
    851		break;
    852	case METRICS_AVERAGE_UCLK:
    853		*value = metrics->AverageUclkFrequencyPostDs;
    854		break;
    855	case METRICS_AVERAGE_GFXACTIVITY:
    856		*value = metrics->AverageGfxActivity;
    857		break;
    858	case METRICS_AVERAGE_MEMACTIVITY:
    859		*value = metrics->AverageUclkActivity;
    860		break;
    861	case METRICS_AVERAGE_SOCKETPOWER:
    862		*value = metrics->AverageSocketPower << 8;
    863		break;
    864	case METRICS_TEMPERATURE_EDGE:
    865		*value = metrics->TemperatureEdge *
    866			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    867		break;
    868	case METRICS_TEMPERATURE_HOTSPOT:
    869		*value = metrics->TemperatureHotspot *
    870			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    871		break;
    872	case METRICS_TEMPERATURE_MEM:
    873		*value = metrics->TemperatureMem *
    874			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    875		break;
    876	case METRICS_TEMPERATURE_VRGFX:
    877		*value = metrics->TemperatureVrGfx *
    878			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    879		break;
    880	case METRICS_TEMPERATURE_VRSOC:
    881		*value = metrics->TemperatureVrSoc *
    882			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
    883		break;
    884	case METRICS_THROTTLER_STATUS:
    885		*value = metrics->ThrottlerStatus;
    886		break;
    887	case METRICS_CURR_FANSPEED:
    888		*value = metrics->CurrFanSpeed;
    889		break;
    890	default:
    891		*value = UINT_MAX;
    892		break;
    893	}
    894
    895	return ret;
    896}
    897
    898static int navi1x_get_smu_metrics_data(struct smu_context *smu,
    899				       MetricsMember_t member,
    900				       uint32_t *value)
    901{
    902	struct amdgpu_device *adev = smu->adev;
    903	uint32_t smu_version;
    904	int ret = 0;
    905
    906	ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
    907	if (ret) {
    908		dev_err(adev->dev, "Failed to get smu version!\n");
    909		return ret;
    910	}
    911
    912	switch (adev->ip_versions[MP1_HWIP][0]) {
    913	case IP_VERSION(11, 0, 9):
    914		if (smu_version > 0x00341C00)
    915			ret = navi12_get_smu_metrics_data(smu, member, value);
    916		else
    917			ret = navi12_get_legacy_smu_metrics_data(smu, member, value);
    918		break;
    919	case IP_VERSION(11, 0, 0):
    920	case IP_VERSION(11, 0, 5):
    921	default:
    922		if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) ||
    923		      ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))
    924			ret = navi10_get_smu_metrics_data(smu, member, value);
    925		else
    926			ret = navi10_get_legacy_smu_metrics_data(smu, member, value);
    927		break;
    928	}
    929
    930	return ret;
    931}
    932
    933static int navi10_allocate_dpm_context(struct smu_context *smu)
    934{
    935	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
    936
    937	smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
    938				       GFP_KERNEL);
    939	if (!smu_dpm->dpm_context)
    940		return -ENOMEM;
    941
    942	smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
    943
    944	return 0;
    945}
    946
    947static int navi10_init_smc_tables(struct smu_context *smu)
    948{
    949	int ret = 0;
    950
    951	ret = navi10_tables_init(smu);
    952	if (ret)
    953		return ret;
    954
    955	ret = navi10_allocate_dpm_context(smu);
    956	if (ret)
    957		return ret;
    958
    959	return smu_v11_0_init_smc_tables(smu);
    960}
    961
    962static int navi10_set_default_dpm_table(struct smu_context *smu)
    963{
    964	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
    965	PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
    966	struct smu_11_0_dpm_table *dpm_table;
    967	int ret = 0;
    968
    969	/* socclk dpm table setup */
    970	dpm_table = &dpm_context->dpm_tables.soc_table;
    971	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
    972		ret = smu_v11_0_set_single_dpm_table(smu,
    973						     SMU_SOCCLK,
    974						     dpm_table);
    975		if (ret)
    976			return ret;
    977		dpm_table->is_fine_grained =
    978			!driver_ppt->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete;
    979	} else {
    980		dpm_table->count = 1;
    981		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
    982		dpm_table->dpm_levels[0].enabled = true;
    983		dpm_table->min = dpm_table->dpm_levels[0].value;
    984		dpm_table->max = dpm_table->dpm_levels[0].value;
    985	}
    986
    987	/* gfxclk dpm table setup */
    988	dpm_table = &dpm_context->dpm_tables.gfx_table;
    989	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
    990		ret = smu_v11_0_set_single_dpm_table(smu,
    991						     SMU_GFXCLK,
    992						     dpm_table);
    993		if (ret)
    994			return ret;
    995		dpm_table->is_fine_grained =
    996			!driver_ppt->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete;
    997	} else {
    998		dpm_table->count = 1;
    999		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
   1000		dpm_table->dpm_levels[0].enabled = true;
   1001		dpm_table->min = dpm_table->dpm_levels[0].value;
   1002		dpm_table->max = dpm_table->dpm_levels[0].value;
   1003	}
   1004
   1005	/* uclk dpm table setup */
   1006	dpm_table = &dpm_context->dpm_tables.uclk_table;
   1007	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
   1008		ret = smu_v11_0_set_single_dpm_table(smu,
   1009						     SMU_UCLK,
   1010						     dpm_table);
   1011		if (ret)
   1012			return ret;
   1013		dpm_table->is_fine_grained =
   1014			!driver_ppt->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete;
   1015	} else {
   1016		dpm_table->count = 1;
   1017		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
   1018		dpm_table->dpm_levels[0].enabled = true;
   1019		dpm_table->min = dpm_table->dpm_levels[0].value;
   1020		dpm_table->max = dpm_table->dpm_levels[0].value;
   1021	}
   1022
   1023	/* vclk dpm table setup */
   1024	dpm_table = &dpm_context->dpm_tables.vclk_table;
   1025	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
   1026		ret = smu_v11_0_set_single_dpm_table(smu,
   1027						     SMU_VCLK,
   1028						     dpm_table);
   1029		if (ret)
   1030			return ret;
   1031		dpm_table->is_fine_grained =
   1032			!driver_ppt->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete;
   1033	} else {
   1034		dpm_table->count = 1;
   1035		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
   1036		dpm_table->dpm_levels[0].enabled = true;
   1037		dpm_table->min = dpm_table->dpm_levels[0].value;
   1038		dpm_table->max = dpm_table->dpm_levels[0].value;
   1039	}
   1040
   1041	/* dclk dpm table setup */
   1042	dpm_table = &dpm_context->dpm_tables.dclk_table;
   1043	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
   1044		ret = smu_v11_0_set_single_dpm_table(smu,
   1045						     SMU_DCLK,
   1046						     dpm_table);
   1047		if (ret)
   1048			return ret;
   1049		dpm_table->is_fine_grained =
   1050			!driver_ppt->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete;
   1051	} else {
   1052		dpm_table->count = 1;
   1053		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
   1054		dpm_table->dpm_levels[0].enabled = true;
   1055		dpm_table->min = dpm_table->dpm_levels[0].value;
   1056		dpm_table->max = dpm_table->dpm_levels[0].value;
   1057	}
   1058
   1059	/* dcefclk dpm table setup */
   1060	dpm_table = &dpm_context->dpm_tables.dcef_table;
   1061	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
   1062		ret = smu_v11_0_set_single_dpm_table(smu,
   1063						     SMU_DCEFCLK,
   1064						     dpm_table);
   1065		if (ret)
   1066			return ret;
   1067		dpm_table->is_fine_grained =
   1068			!driver_ppt->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete;
   1069	} else {
   1070		dpm_table->count = 1;
   1071		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
   1072		dpm_table->dpm_levels[0].enabled = true;
   1073		dpm_table->min = dpm_table->dpm_levels[0].value;
   1074		dpm_table->max = dpm_table->dpm_levels[0].value;
   1075	}
   1076
   1077	/* pixelclk dpm table setup */
   1078	dpm_table = &dpm_context->dpm_tables.pixel_table;
   1079	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
   1080		ret = smu_v11_0_set_single_dpm_table(smu,
   1081						     SMU_PIXCLK,
   1082						     dpm_table);
   1083		if (ret)
   1084			return ret;
   1085		dpm_table->is_fine_grained =
   1086			!driver_ppt->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete;
   1087	} else {
   1088		dpm_table->count = 1;
   1089		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
   1090		dpm_table->dpm_levels[0].enabled = true;
   1091		dpm_table->min = dpm_table->dpm_levels[0].value;
   1092		dpm_table->max = dpm_table->dpm_levels[0].value;
   1093	}
   1094
   1095	/* displayclk dpm table setup */
   1096	dpm_table = &dpm_context->dpm_tables.display_table;
   1097	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
   1098		ret = smu_v11_0_set_single_dpm_table(smu,
   1099						     SMU_DISPCLK,
   1100						     dpm_table);
   1101		if (ret)
   1102			return ret;
   1103		dpm_table->is_fine_grained =
   1104			!driver_ppt->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete;
   1105	} else {
   1106		dpm_table->count = 1;
   1107		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
   1108		dpm_table->dpm_levels[0].enabled = true;
   1109		dpm_table->min = dpm_table->dpm_levels[0].value;
   1110		dpm_table->max = dpm_table->dpm_levels[0].value;
   1111	}
   1112
   1113	/* phyclk dpm table setup */
   1114	dpm_table = &dpm_context->dpm_tables.phy_table;
   1115	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
   1116		ret = smu_v11_0_set_single_dpm_table(smu,
   1117						     SMU_PHYCLK,
   1118						     dpm_table);
   1119		if (ret)
   1120			return ret;
   1121		dpm_table->is_fine_grained =
   1122			!driver_ppt->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete;
   1123	} else {
   1124		dpm_table->count = 1;
   1125		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
   1126		dpm_table->dpm_levels[0].enabled = true;
   1127		dpm_table->min = dpm_table->dpm_levels[0].value;
   1128		dpm_table->max = dpm_table->dpm_levels[0].value;
   1129	}
   1130
   1131	return 0;
   1132}
   1133
   1134static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
   1135{
   1136	int ret = 0;
   1137
   1138	if (enable) {
   1139		/* vcn dpm on is a prerequisite for vcn power gate messages */
   1140		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
   1141			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
   1142			if (ret)
   1143				return ret;
   1144		}
   1145	} else {
   1146		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
   1147			ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
   1148			if (ret)
   1149				return ret;
   1150		}
   1151	}
   1152
   1153	return ret;
   1154}
   1155
   1156static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
   1157{
   1158	int ret = 0;
   1159
   1160	if (enable) {
   1161		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
   1162			ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
   1163			if (ret)
   1164				return ret;
   1165		}
   1166	} else {
   1167		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
   1168			ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
   1169			if (ret)
   1170				return ret;
   1171		}
   1172	}
   1173
   1174	return ret;
   1175}
   1176
   1177static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
   1178				       enum smu_clk_type clk_type,
   1179				       uint32_t *value)
   1180{
   1181	MetricsMember_t member_type;
   1182	int clk_id = 0;
   1183
   1184	clk_id = smu_cmn_to_asic_specific_index(smu,
   1185						CMN2ASIC_MAPPING_CLK,
   1186						clk_type);
   1187	if (clk_id < 0)
   1188		return clk_id;
   1189
   1190	switch (clk_id) {
   1191	case PPCLK_GFXCLK:
   1192		member_type = METRICS_CURR_GFXCLK;
   1193		break;
   1194	case PPCLK_UCLK:
   1195		member_type = METRICS_CURR_UCLK;
   1196		break;
   1197	case PPCLK_SOCCLK:
   1198		member_type = METRICS_CURR_SOCCLK;
   1199		break;
   1200	case PPCLK_VCLK:
   1201		member_type = METRICS_CURR_VCLK;
   1202		break;
   1203	case PPCLK_DCLK:
   1204		member_type = METRICS_CURR_DCLK;
   1205		break;
   1206	case PPCLK_DCEFCLK:
   1207		member_type = METRICS_CURR_DCEFCLK;
   1208		break;
   1209	default:
   1210		return -EINVAL;
   1211	}
   1212
   1213	return navi1x_get_smu_metrics_data(smu,
   1214					   member_type,
   1215					   value);
   1216}
   1217
   1218static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
   1219{
   1220	PPTable_t *pptable = smu->smu_table.driver_pptable;
   1221	DpmDescriptor_t *dpm_desc = NULL;
   1222	uint32_t clk_index = 0;
   1223
   1224	clk_index = smu_cmn_to_asic_specific_index(smu,
   1225						   CMN2ASIC_MAPPING_CLK,
   1226						   clk_type);
   1227	dpm_desc = &pptable->DpmDescriptor[clk_index];
   1228
   1229	/* 0 - Fine grained DPM, 1 - Discrete DPM */
   1230	return dpm_desc->SnapToDiscrete == 0;
   1231}
   1232
   1233static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
   1234{
   1235	return od_table->cap[cap];
   1236}
   1237
   1238static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
   1239					enum SMU_11_0_ODSETTING_ID setting,
   1240					uint32_t *min, uint32_t *max)
   1241{
   1242	if (min)
   1243		*min = od_table->min[setting];
   1244	if (max)
   1245		*max = od_table->max[setting];
   1246}
   1247
   1248static int navi10_emit_clk_levels(struct smu_context *smu,
   1249				  enum smu_clk_type clk_type,
   1250				  char *buf,
   1251				  int *offset)
   1252{
   1253	uint16_t *curve_settings;
   1254	int ret = 0;
   1255	uint32_t cur_value = 0, value = 0;
   1256	uint32_t freq_values[3] = {0};
   1257	uint32_t i, levels, mark_index = 0, count = 0;
   1258	struct smu_table_context *table_context = &smu->smu_table;
   1259	uint32_t gen_speed, lane_width;
   1260	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
   1261	struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
   1262	PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
   1263	OverDriveTable_t *od_table =
   1264		(OverDriveTable_t *)table_context->overdrive_table;
   1265	struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
   1266	uint32_t min_value, max_value;
   1267
   1268	switch (clk_type) {
   1269	case SMU_GFXCLK:
   1270	case SMU_SCLK:
   1271	case SMU_SOCCLK:
   1272	case SMU_MCLK:
   1273	case SMU_UCLK:
   1274	case SMU_FCLK:
   1275	case SMU_VCLK:
   1276	case SMU_DCLK:
   1277	case SMU_DCEFCLK:
   1278		ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
   1279		if (ret)
   1280			return ret;
   1281
   1282		ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
   1283		if (ret)
   1284			return ret;
   1285
   1286		if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
   1287			for (i = 0; i < count; i++) {
   1288				ret = smu_v11_0_get_dpm_freq_by_index(smu,
   1289								      clk_type, i, &value);
   1290				if (ret)
   1291					return ret;
   1292
   1293				*offset += sysfs_emit_at(buf, *offset,
   1294						"%d: %uMhz %s\n",
   1295						i, value,
   1296						cur_value == value ? "*" : "");
   1297			}
   1298		} else {
   1299			ret = smu_v11_0_get_dpm_freq_by_index(smu,
   1300							      clk_type, 0, &freq_values[0]);
   1301			if (ret)
   1302				return ret;
   1303			ret = smu_v11_0_get_dpm_freq_by_index(smu,
   1304							      clk_type,
   1305							      count - 1,
   1306							      &freq_values[2]);
   1307			if (ret)
   1308				return ret;
   1309
   1310			freq_values[1] = cur_value;
   1311			mark_index = cur_value == freq_values[0] ? 0 :
   1312				     cur_value == freq_values[2] ? 2 : 1;
   1313
   1314			levels = 3;
   1315			if (mark_index != 1) {
   1316				levels = 2;
   1317				freq_values[1] = freq_values[2];
   1318			}
   1319
   1320			for (i = 0; i < levels; i++) {
   1321				*offset += sysfs_emit_at(buf, *offset,
   1322						"%d: %uMhz %s\n",
   1323						i, freq_values[i],
   1324						i == mark_index ? "*" : "");
   1325			}
   1326		}
   1327		break;
   1328	case SMU_PCIE:
   1329		gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
   1330		lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
   1331		for (i = 0; i < NUM_LINK_LEVELS; i++) {
   1332			*offset += sysfs_emit_at(buf, *offset, "%d: %s %s %dMhz %s\n", i,
   1333					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
   1334					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
   1335					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
   1336					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
   1337					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
   1338					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
   1339					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
   1340					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
   1341					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
   1342					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
   1343					pptable->LclkFreq[i],
   1344					(gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
   1345					(lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
   1346					"*" : "");
   1347		}
   1348		break;
   1349	case SMU_OD_SCLK:
   1350		if (!smu->od_enabled || !od_table || !od_settings)
   1351			return -EOPNOTSUPP;
   1352		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
   1353			break;
   1354		*offset += sysfs_emit_at(buf, *offset, "OD_SCLK:\n0: %uMhz\n1: %uMhz\n",
   1355					  od_table->GfxclkFmin, od_table->GfxclkFmax);
   1356		break;
   1357	case SMU_OD_MCLK:
   1358		if (!smu->od_enabled || !od_table || !od_settings)
   1359			return -EOPNOTSUPP;
   1360		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
   1361			break;
   1362		*offset += sysfs_emit_at(buf, *offset, "OD_MCLK:\n1: %uMHz\n", od_table->UclkFmax);
   1363		break;
   1364	case SMU_OD_VDDC_CURVE:
   1365		if (!smu->od_enabled || !od_table || !od_settings)
   1366			return -EOPNOTSUPP;
   1367		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
   1368			break;
   1369		*offset += sysfs_emit_at(buf, *offset, "OD_VDDC_CURVE:\n");
   1370		for (i = 0; i < 3; i++) {
   1371			switch (i) {
   1372			case 0:
   1373				curve_settings = &od_table->GfxclkFreq1;
   1374				break;
   1375			case 1:
   1376				curve_settings = &od_table->GfxclkFreq2;
   1377				break;
   1378			case 2:
   1379				curve_settings = &od_table->GfxclkFreq3;
   1380				break;
   1381			default:
   1382				break;
   1383			}
   1384			*offset += sysfs_emit_at(buf, *offset, "%d: %uMHz %umV\n",
   1385						  i, curve_settings[0],
   1386					curve_settings[1] / NAVI10_VOLTAGE_SCALE);
   1387		}
   1388		break;
   1389	case SMU_OD_RANGE:
   1390		if (!smu->od_enabled || !od_table || !od_settings)
   1391			return -EOPNOTSUPP;
   1392		*offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_RANGE");
   1393
   1394		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
   1395			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
   1396						    &min_value, NULL);
   1397			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
   1398						    NULL, &max_value);
   1399			*offset += sysfs_emit_at(buf, *offset, "SCLK: %7uMhz %10uMhz\n",
   1400					min_value, max_value);
   1401		}
   1402
   1403		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
   1404			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
   1405						    &min_value, &max_value);
   1406			*offset += sysfs_emit_at(buf, *offset, "MCLK: %7uMhz %10uMhz\n",
   1407					min_value, max_value);
   1408		}
   1409
   1410		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
   1411			navi10_od_setting_get_range(od_settings,
   1412						    SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
   1413						    &min_value, &max_value);
   1414			*offset += sysfs_emit_at(buf, *offset,
   1415						 "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
   1416						 min_value, max_value);
   1417			navi10_od_setting_get_range(od_settings,
   1418						    SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
   1419						    &min_value, &max_value);
   1420			*offset += sysfs_emit_at(buf, *offset,
   1421						 "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
   1422						 min_value, max_value);
   1423			navi10_od_setting_get_range(od_settings,
   1424						    SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
   1425						    &min_value, &max_value);
   1426			*offset += sysfs_emit_at(buf, *offset,
   1427						 "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
   1428						 min_value, max_value);
   1429			navi10_od_setting_get_range(od_settings,
   1430						    SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
   1431						    &min_value, &max_value);
   1432			*offset += sysfs_emit_at(buf, *offset,
   1433						 "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
   1434						 min_value, max_value);
   1435			navi10_od_setting_get_range(od_settings,
   1436						    SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
   1437						    &min_value, &max_value);
   1438			*offset += sysfs_emit_at(buf, *offset,
   1439						 "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
   1440						 min_value, max_value);
   1441			navi10_od_setting_get_range(od_settings,
   1442						    SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
   1443						    &min_value, &max_value);
   1444			*offset += sysfs_emit_at(buf, *offset,
   1445						 "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
   1446						 min_value, max_value);
   1447		}
   1448
   1449		break;
   1450	default:
   1451		break;
   1452	}
   1453
   1454	return 0;
   1455}
   1456
   1457static int navi10_print_clk_levels(struct smu_context *smu,
   1458			enum smu_clk_type clk_type, char *buf)
   1459{
   1460	uint16_t *curve_settings;
   1461	int i, levels, size = 0, ret = 0;
   1462	uint32_t cur_value = 0, value = 0, count = 0;
   1463	uint32_t freq_values[3] = {0};
   1464	uint32_t mark_index = 0;
   1465	struct smu_table_context *table_context = &smu->smu_table;
   1466	uint32_t gen_speed, lane_width;
   1467	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
   1468	struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
   1469	PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
   1470	OverDriveTable_t *od_table =
   1471		(OverDriveTable_t *)table_context->overdrive_table;
   1472	struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
   1473	uint32_t min_value, max_value;
   1474
   1475	smu_cmn_get_sysfs_buf(&buf, &size);
   1476
   1477	switch (clk_type) {
   1478	case SMU_GFXCLK:
   1479	case SMU_SCLK:
   1480	case SMU_SOCCLK:
   1481	case SMU_MCLK:
   1482	case SMU_UCLK:
   1483	case SMU_FCLK:
   1484	case SMU_VCLK:
   1485	case SMU_DCLK:
   1486	case SMU_DCEFCLK:
   1487		ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
   1488		if (ret)
   1489			return size;
   1490
   1491		ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
   1492		if (ret)
   1493			return size;
   1494
   1495		if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
   1496			for (i = 0; i < count; i++) {
   1497				ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
   1498				if (ret)
   1499					return size;
   1500
   1501				size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
   1502						cur_value == value ? "*" : "");
   1503			}
   1504		} else {
   1505			ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
   1506			if (ret)
   1507				return size;
   1508			ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
   1509			if (ret)
   1510				return size;
   1511
   1512			freq_values[1] = cur_value;
   1513			mark_index = cur_value == freq_values[0] ? 0 :
   1514				     cur_value == freq_values[2] ? 2 : 1;
   1515
   1516			levels = 3;
   1517			if (mark_index != 1) {
   1518				levels = 2;
   1519				freq_values[1] = freq_values[2];
   1520			}
   1521
   1522			for (i = 0; i < levels; i++) {
   1523				size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i],
   1524						i == mark_index ? "*" : "");
   1525			}
   1526		}
   1527		break;
   1528	case SMU_PCIE:
   1529		gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
   1530		lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
   1531		for (i = 0; i < NUM_LINK_LEVELS; i++)
   1532			size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
   1533					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
   1534					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
   1535					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
   1536					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
   1537					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
   1538					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
   1539					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
   1540					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
   1541					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
   1542					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
   1543					pptable->LclkFreq[i],
   1544					(gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
   1545					(lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
   1546					"*" : "");
   1547		break;
   1548	case SMU_OD_SCLK:
   1549		if (!smu->od_enabled || !od_table || !od_settings)
   1550			break;
   1551		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
   1552			break;
   1553		size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
   1554		size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
   1555				      od_table->GfxclkFmin, od_table->GfxclkFmax);
   1556		break;
   1557	case SMU_OD_MCLK:
   1558		if (!smu->od_enabled || !od_table || !od_settings)
   1559			break;
   1560		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
   1561			break;
   1562		size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
   1563		size += sysfs_emit_at(buf, size, "1: %uMHz\n", od_table->UclkFmax);
   1564		break;
   1565	case SMU_OD_VDDC_CURVE:
   1566		if (!smu->od_enabled || !od_table || !od_settings)
   1567			break;
   1568		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
   1569			break;
   1570		size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n");
   1571		for (i = 0; i < 3; i++) {
   1572			switch (i) {
   1573			case 0:
   1574				curve_settings = &od_table->GfxclkFreq1;
   1575				break;
   1576			case 1:
   1577				curve_settings = &od_table->GfxclkFreq2;
   1578				break;
   1579			case 2:
   1580				curve_settings = &od_table->GfxclkFreq3;
   1581				break;
   1582			default:
   1583				break;
   1584			}
   1585			size += sysfs_emit_at(buf, size, "%d: %uMHz %umV\n",
   1586					      i, curve_settings[0],
   1587					curve_settings[1] / NAVI10_VOLTAGE_SCALE);
   1588		}
   1589		break;
   1590	case SMU_OD_RANGE:
   1591		if (!smu->od_enabled || !od_table || !od_settings)
   1592			break;
   1593		size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
   1594
   1595		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
   1596			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
   1597						    &min_value, NULL);
   1598			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
   1599						    NULL, &max_value);
   1600			size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
   1601					min_value, max_value);
   1602		}
   1603
   1604		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
   1605			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
   1606						    &min_value, &max_value);
   1607			size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
   1608					min_value, max_value);
   1609		}
   1610
   1611		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
   1612			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
   1613						    &min_value, &max_value);
   1614			size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
   1615					      min_value, max_value);
   1616			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
   1617						    &min_value, &max_value);
   1618			size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
   1619					      min_value, max_value);
   1620			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
   1621						    &min_value, &max_value);
   1622			size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
   1623					      min_value, max_value);
   1624			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
   1625						    &min_value, &max_value);
   1626			size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
   1627					      min_value, max_value);
   1628			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
   1629						    &min_value, &max_value);
   1630			size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
   1631					      min_value, max_value);
   1632			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
   1633						    &min_value, &max_value);
   1634			size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
   1635					      min_value, max_value);
   1636		}
   1637
   1638		break;
   1639	default:
   1640		break;
   1641	}
   1642
   1643	return size;
   1644}
   1645
   1646static int navi10_force_clk_levels(struct smu_context *smu,
   1647				   enum smu_clk_type clk_type, uint32_t mask)
   1648{
   1649
   1650	int ret = 0, size = 0;
   1651	uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
   1652
   1653	soft_min_level = mask ? (ffs(mask) - 1) : 0;
   1654	soft_max_level = mask ? (fls(mask) - 1) : 0;
   1655
   1656	switch (clk_type) {
   1657	case SMU_GFXCLK:
   1658	case SMU_SCLK:
   1659	case SMU_SOCCLK:
   1660	case SMU_MCLK:
   1661	case SMU_UCLK:
   1662	case SMU_FCLK:
   1663		/* There is only 2 levels for fine grained DPM */
   1664		if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
   1665			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
   1666			soft_min_level = (soft_min_level >= 1 ? 1 : 0);
   1667		}
   1668
   1669		ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
   1670		if (ret)
   1671			return size;
   1672
   1673		ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
   1674		if (ret)
   1675			return size;
   1676
   1677		ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
   1678		if (ret)
   1679			return size;
   1680		break;
   1681	case SMU_DCEFCLK:
   1682		dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
   1683		break;
   1684
   1685	default:
   1686		break;
   1687	}
   1688
   1689	return size;
   1690}
   1691
   1692static int navi10_populate_umd_state_clk(struct smu_context *smu)
   1693{
   1694	struct smu_11_0_dpm_context *dpm_context =
   1695				smu->smu_dpm.dpm_context;
   1696	struct smu_11_0_dpm_table *gfx_table =
   1697				&dpm_context->dpm_tables.gfx_table;
   1698	struct smu_11_0_dpm_table *mem_table =
   1699				&dpm_context->dpm_tables.uclk_table;
   1700	struct smu_11_0_dpm_table *soc_table =
   1701				&dpm_context->dpm_tables.soc_table;
   1702	struct smu_umd_pstate_table *pstate_table =
   1703				&smu->pstate_table;
   1704	struct amdgpu_device *adev = smu->adev;
   1705	uint32_t sclk_freq;
   1706
   1707	pstate_table->gfxclk_pstate.min = gfx_table->min;
   1708	switch (adev->ip_versions[MP1_HWIP][0]) {
   1709	case IP_VERSION(11, 0, 0):
   1710		switch (adev->pdev->revision) {
   1711		case 0xf0: /* XTX */
   1712		case 0xc0:
   1713			sclk_freq = NAVI10_PEAK_SCLK_XTX;
   1714			break;
   1715		case 0xf1: /* XT */
   1716		case 0xc1:
   1717			sclk_freq = NAVI10_PEAK_SCLK_XT;
   1718			break;
   1719		default: /* XL */
   1720			sclk_freq = NAVI10_PEAK_SCLK_XL;
   1721			break;
   1722		}
   1723		break;
   1724	case IP_VERSION(11, 0, 5):
   1725		switch (adev->pdev->revision) {
   1726		case 0xc7: /* XT */
   1727		case 0xf4:
   1728			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
   1729			break;
   1730		case 0xc1: /* XTM */
   1731		case 0xf2:
   1732			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
   1733			break;
   1734		case 0xc3: /* XLM */
   1735		case 0xf3:
   1736			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
   1737			break;
   1738		case 0xc5: /* XTX */
   1739		case 0xf6:
   1740			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
   1741			break;
   1742		default: /* XL */
   1743			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
   1744			break;
   1745		}
   1746		break;
   1747	case IP_VERSION(11, 0, 9):
   1748		sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
   1749		break;
   1750	default:
   1751		sclk_freq = gfx_table->dpm_levels[gfx_table->count - 1].value;
   1752		break;
   1753	}
   1754	pstate_table->gfxclk_pstate.peak = sclk_freq;
   1755
   1756	pstate_table->uclk_pstate.min = mem_table->min;
   1757	pstate_table->uclk_pstate.peak = mem_table->max;
   1758
   1759	pstate_table->socclk_pstate.min = soc_table->min;
   1760	pstate_table->socclk_pstate.peak = soc_table->max;
   1761
   1762	if (gfx_table->max > NAVI10_UMD_PSTATE_PROFILING_GFXCLK &&
   1763	    mem_table->max > NAVI10_UMD_PSTATE_PROFILING_MEMCLK &&
   1764	    soc_table->max > NAVI10_UMD_PSTATE_PROFILING_SOCCLK) {
   1765		pstate_table->gfxclk_pstate.standard =
   1766			NAVI10_UMD_PSTATE_PROFILING_GFXCLK;
   1767		pstate_table->uclk_pstate.standard =
   1768			NAVI10_UMD_PSTATE_PROFILING_MEMCLK;
   1769		pstate_table->socclk_pstate.standard =
   1770			NAVI10_UMD_PSTATE_PROFILING_SOCCLK;
   1771	} else {
   1772		pstate_table->gfxclk_pstate.standard =
   1773			pstate_table->gfxclk_pstate.min;
   1774		pstate_table->uclk_pstate.standard =
   1775			pstate_table->uclk_pstate.min;
   1776		pstate_table->socclk_pstate.standard =
   1777			pstate_table->socclk_pstate.min;
   1778	}
   1779
   1780	return 0;
   1781}
   1782
   1783static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
   1784						 enum smu_clk_type clk_type,
   1785						 struct pp_clock_levels_with_latency *clocks)
   1786{
   1787	int ret = 0, i = 0;
   1788	uint32_t level_count = 0, freq = 0;
   1789
   1790	switch (clk_type) {
   1791	case SMU_GFXCLK:
   1792	case SMU_DCEFCLK:
   1793	case SMU_SOCCLK:
   1794	case SMU_MCLK:
   1795	case SMU_UCLK:
   1796		ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &level_count);
   1797		if (ret)
   1798			return ret;
   1799
   1800		level_count = min(level_count, (uint32_t)MAX_NUM_CLOCKS);
   1801		clocks->num_levels = level_count;
   1802
   1803		for (i = 0; i < level_count; i++) {
   1804			ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &freq);
   1805			if (ret)
   1806				return ret;
   1807
   1808			clocks->data[i].clocks_in_khz = freq * 1000;
   1809			clocks->data[i].latency_in_us = 0;
   1810		}
   1811		break;
   1812	default:
   1813		break;
   1814	}
   1815
   1816	return ret;
   1817}
   1818
   1819static int navi10_pre_display_config_changed(struct smu_context *smu)
   1820{
   1821	int ret = 0;
   1822	uint32_t max_freq = 0;
   1823
   1824	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
   1825	if (ret)
   1826		return ret;
   1827
   1828	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
   1829		ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
   1830		if (ret)
   1831			return ret;
   1832		ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, max_freq);
   1833		if (ret)
   1834			return ret;
   1835	}
   1836
   1837	return ret;
   1838}
   1839
   1840static int navi10_display_config_changed(struct smu_context *smu)
   1841{
   1842	int ret = 0;
   1843
   1844	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
   1845	    smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
   1846	    smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
   1847		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
   1848						  smu->display_config->num_display,
   1849						  NULL);
   1850		if (ret)
   1851			return ret;
   1852	}
   1853
   1854	return ret;
   1855}
   1856
   1857static bool navi10_is_dpm_running(struct smu_context *smu)
   1858{
   1859	int ret = 0;
   1860	uint64_t feature_enabled;
   1861
   1862	ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
   1863	if (ret)
   1864		return false;
   1865
   1866	return !!(feature_enabled & SMC_DPM_FEATURE);
   1867}
   1868
   1869static int navi10_get_fan_speed_rpm(struct smu_context *smu,
   1870				    uint32_t *speed)
   1871{
   1872	int ret = 0;
   1873
   1874	if (!speed)
   1875		return -EINVAL;
   1876
   1877	switch (smu_v11_0_get_fan_control_mode(smu)) {
   1878	case AMD_FAN_CTRL_AUTO:
   1879		ret = navi10_get_smu_metrics_data(smu,
   1880						  METRICS_CURR_FANSPEED,
   1881						  speed);
   1882		break;
   1883	default:
   1884		ret = smu_v11_0_get_fan_speed_rpm(smu,
   1885						  speed);
   1886		break;
   1887	}
   1888
   1889	return ret;
   1890}
   1891
   1892static int navi10_get_fan_parameters(struct smu_context *smu)
   1893{
   1894	PPTable_t *pptable = smu->smu_table.driver_pptable;
   1895
   1896	smu->fan_max_rpm = pptable->FanMaximumRpm;
   1897
   1898	return 0;
   1899}
   1900
   1901static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
   1902{
   1903	DpmActivityMonitorCoeffInt_t activity_monitor;
   1904	uint32_t i, size = 0;
   1905	int16_t workload_type = 0;
   1906	static const char *title[] = {
   1907			"PROFILE_INDEX(NAME)",
   1908			"CLOCK_TYPE(NAME)",
   1909			"FPS",
   1910			"MinFreqType",
   1911			"MinActiveFreqType",
   1912			"MinActiveFreq",
   1913			"BoosterFreqType",
   1914			"BoosterFreq",
   1915			"PD_Data_limit_c",
   1916			"PD_Data_error_coeff",
   1917			"PD_Data_error_rate_coeff"};
   1918	int result = 0;
   1919
   1920	if (!buf)
   1921		return -EINVAL;
   1922
   1923	size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
   1924			title[0], title[1], title[2], title[3], title[4], title[5],
   1925			title[6], title[7], title[8], title[9], title[10]);
   1926
   1927	for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
   1928		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
   1929		workload_type = smu_cmn_to_asic_specific_index(smu,
   1930							       CMN2ASIC_MAPPING_WORKLOAD,
   1931							       i);
   1932		if (workload_type < 0)
   1933			return -EINVAL;
   1934
   1935		result = smu_cmn_update_table(smu,
   1936					  SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
   1937					  (void *)(&activity_monitor), false);
   1938		if (result) {
   1939			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
   1940			return result;
   1941		}
   1942
   1943		size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
   1944			i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
   1945
   1946		size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
   1947			" ",
   1948			0,
   1949			"GFXCLK",
   1950			activity_monitor.Gfx_FPS,
   1951			activity_monitor.Gfx_MinFreqStep,
   1952			activity_monitor.Gfx_MinActiveFreqType,
   1953			activity_monitor.Gfx_MinActiveFreq,
   1954			activity_monitor.Gfx_BoosterFreqType,
   1955			activity_monitor.Gfx_BoosterFreq,
   1956			activity_monitor.Gfx_PD_Data_limit_c,
   1957			activity_monitor.Gfx_PD_Data_error_coeff,
   1958			activity_monitor.Gfx_PD_Data_error_rate_coeff);
   1959
   1960		size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
   1961			" ",
   1962			1,
   1963			"SOCCLK",
   1964			activity_monitor.Soc_FPS,
   1965			activity_monitor.Soc_MinFreqStep,
   1966			activity_monitor.Soc_MinActiveFreqType,
   1967			activity_monitor.Soc_MinActiveFreq,
   1968			activity_monitor.Soc_BoosterFreqType,
   1969			activity_monitor.Soc_BoosterFreq,
   1970			activity_monitor.Soc_PD_Data_limit_c,
   1971			activity_monitor.Soc_PD_Data_error_coeff,
   1972			activity_monitor.Soc_PD_Data_error_rate_coeff);
   1973
   1974		size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
   1975			" ",
   1976			2,
   1977			"MEMLK",
   1978			activity_monitor.Mem_FPS,
   1979			activity_monitor.Mem_MinFreqStep,
   1980			activity_monitor.Mem_MinActiveFreqType,
   1981			activity_monitor.Mem_MinActiveFreq,
   1982			activity_monitor.Mem_BoosterFreqType,
   1983			activity_monitor.Mem_BoosterFreq,
   1984			activity_monitor.Mem_PD_Data_limit_c,
   1985			activity_monitor.Mem_PD_Data_error_coeff,
   1986			activity_monitor.Mem_PD_Data_error_rate_coeff);
   1987	}
   1988
   1989	return size;
   1990}
   1991
   1992static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
   1993{
   1994	DpmActivityMonitorCoeffInt_t activity_monitor;
   1995	int workload_type, ret = 0;
   1996
   1997	smu->power_profile_mode = input[size];
   1998
   1999	if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
   2000		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
   2001		return -EINVAL;
   2002	}
   2003
   2004	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
   2005
   2006		ret = smu_cmn_update_table(smu,
   2007				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
   2008				       (void *)(&activity_monitor), false);
   2009		if (ret) {
   2010			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
   2011			return ret;
   2012		}
   2013
   2014		switch (input[0]) {
   2015		case 0: /* Gfxclk */
   2016			activity_monitor.Gfx_FPS = input[1];
   2017			activity_monitor.Gfx_MinFreqStep = input[2];
   2018			activity_monitor.Gfx_MinActiveFreqType = input[3];
   2019			activity_monitor.Gfx_MinActiveFreq = input[4];
   2020			activity_monitor.Gfx_BoosterFreqType = input[5];
   2021			activity_monitor.Gfx_BoosterFreq = input[6];
   2022			activity_monitor.Gfx_PD_Data_limit_c = input[7];
   2023			activity_monitor.Gfx_PD_Data_error_coeff = input[8];
   2024			activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
   2025			break;
   2026		case 1: /* Socclk */
   2027			activity_monitor.Soc_FPS = input[1];
   2028			activity_monitor.Soc_MinFreqStep = input[2];
   2029			activity_monitor.Soc_MinActiveFreqType = input[3];
   2030			activity_monitor.Soc_MinActiveFreq = input[4];
   2031			activity_monitor.Soc_BoosterFreqType = input[5];
   2032			activity_monitor.Soc_BoosterFreq = input[6];
   2033			activity_monitor.Soc_PD_Data_limit_c = input[7];
   2034			activity_monitor.Soc_PD_Data_error_coeff = input[8];
   2035			activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
   2036			break;
   2037		case 2: /* Memlk */
   2038			activity_monitor.Mem_FPS = input[1];
   2039			activity_monitor.Mem_MinFreqStep = input[2];
   2040			activity_monitor.Mem_MinActiveFreqType = input[3];
   2041			activity_monitor.Mem_MinActiveFreq = input[4];
   2042			activity_monitor.Mem_BoosterFreqType = input[5];
   2043			activity_monitor.Mem_BoosterFreq = input[6];
   2044			activity_monitor.Mem_PD_Data_limit_c = input[7];
   2045			activity_monitor.Mem_PD_Data_error_coeff = input[8];
   2046			activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
   2047			break;
   2048		}
   2049
   2050		ret = smu_cmn_update_table(smu,
   2051				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
   2052				       (void *)(&activity_monitor), true);
   2053		if (ret) {
   2054			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
   2055			return ret;
   2056		}
   2057	}
   2058
   2059	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
   2060	workload_type = smu_cmn_to_asic_specific_index(smu,
   2061						       CMN2ASIC_MAPPING_WORKLOAD,
   2062						       smu->power_profile_mode);
   2063	if (workload_type < 0)
   2064		return -EINVAL;
   2065	smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
   2066				    1 << workload_type, NULL);
   2067
   2068	return ret;
   2069}
   2070
   2071static int navi10_notify_smc_display_config(struct smu_context *smu)
   2072{
   2073	struct smu_clocks min_clocks = {0};
   2074	struct pp_display_clock_request clock_req;
   2075	int ret = 0;
   2076
   2077	min_clocks.dcef_clock = smu->display_config->min_dcef_set_clk;
   2078	min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
   2079	min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
   2080
   2081	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
   2082		clock_req.clock_type = amd_pp_dcef_clock;
   2083		clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
   2084
   2085		ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
   2086		if (!ret) {
   2087			if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
   2088				ret = smu_cmn_send_smc_msg_with_param(smu,
   2089								  SMU_MSG_SetMinDeepSleepDcefclk,
   2090								  min_clocks.dcef_clock_in_sr/100,
   2091								  NULL);
   2092				if (ret) {
   2093					dev_err(smu->adev->dev, "Attempt to set divider for DCEFCLK Failed!");
   2094					return ret;
   2095				}
   2096			}
   2097		} else {
   2098			dev_info(smu->adev->dev, "Attempt to set Hard Min for DCEFCLK Failed!");
   2099		}
   2100	}
   2101
   2102	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
   2103		ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
   2104		if (ret) {
   2105			dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
   2106			return ret;
   2107		}
   2108	}
   2109
   2110	return 0;
   2111}
   2112
   2113static int navi10_set_watermarks_table(struct smu_context *smu,
   2114				       struct pp_smu_wm_range_sets *clock_ranges)
   2115{
   2116	Watermarks_t *table = smu->smu_table.watermarks_table;
   2117	int ret = 0;
   2118	int i;
   2119
   2120	if (clock_ranges) {
   2121		if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
   2122		    clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
   2123			return -EINVAL;
   2124
   2125		for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
   2126			table->WatermarkRow[WM_DCEFCLK][i].MinClock =
   2127				clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
   2128			table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
   2129				clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
   2130			table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
   2131				clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
   2132			table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
   2133				clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
   2134
   2135			table->WatermarkRow[WM_DCEFCLK][i].WmSetting =
   2136				clock_ranges->reader_wm_sets[i].wm_inst;
   2137		}
   2138
   2139		for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
   2140			table->WatermarkRow[WM_SOCCLK][i].MinClock =
   2141				clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
   2142			table->WatermarkRow[WM_SOCCLK][i].MaxClock =
   2143				clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
   2144			table->WatermarkRow[WM_SOCCLK][i].MinUclk =
   2145				clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
   2146			table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
   2147				clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
   2148
   2149			table->WatermarkRow[WM_SOCCLK][i].WmSetting =
   2150				clock_ranges->writer_wm_sets[i].wm_inst;
   2151		}
   2152
   2153		smu->watermarks_bitmap |= WATERMARKS_EXIST;
   2154	}
   2155
   2156	/* pass data to smu controller */
   2157	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
   2158	     !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
   2159		ret = smu_cmn_write_watermarks_table(smu);
   2160		if (ret) {
   2161			dev_err(smu->adev->dev, "Failed to update WMTABLE!");
   2162			return ret;
   2163		}
   2164		smu->watermarks_bitmap |= WATERMARKS_LOADED;
   2165	}
   2166
   2167	return 0;
   2168}
   2169
   2170static int navi10_read_sensor(struct smu_context *smu,
   2171				 enum amd_pp_sensors sensor,
   2172				 void *data, uint32_t *size)
   2173{
   2174	int ret = 0;
   2175	struct smu_table_context *table_context = &smu->smu_table;
   2176	PPTable_t *pptable = table_context->driver_pptable;
   2177
   2178	if(!data || !size)
   2179		return -EINVAL;
   2180
   2181	switch (sensor) {
   2182	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
   2183		*(uint32_t *)data = pptable->FanMaximumRpm;
   2184		*size = 4;
   2185		break;
   2186	case AMDGPU_PP_SENSOR_MEM_LOAD:
   2187		ret = navi1x_get_smu_metrics_data(smu,
   2188						  METRICS_AVERAGE_MEMACTIVITY,
   2189						  (uint32_t *)data);
   2190		*size = 4;
   2191		break;
   2192	case AMDGPU_PP_SENSOR_GPU_LOAD:
   2193		ret = navi1x_get_smu_metrics_data(smu,
   2194						  METRICS_AVERAGE_GFXACTIVITY,
   2195						  (uint32_t *)data);
   2196		*size = 4;
   2197		break;
   2198	case AMDGPU_PP_SENSOR_GPU_POWER:
   2199		ret = navi1x_get_smu_metrics_data(smu,
   2200						  METRICS_AVERAGE_SOCKETPOWER,
   2201						  (uint32_t *)data);
   2202		*size = 4;
   2203		break;
   2204	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
   2205		ret = navi1x_get_smu_metrics_data(smu,
   2206						  METRICS_TEMPERATURE_HOTSPOT,
   2207						  (uint32_t *)data);
   2208		*size = 4;
   2209		break;
   2210	case AMDGPU_PP_SENSOR_EDGE_TEMP:
   2211		ret = navi1x_get_smu_metrics_data(smu,
   2212						  METRICS_TEMPERATURE_EDGE,
   2213						  (uint32_t *)data);
   2214		*size = 4;
   2215		break;
   2216	case AMDGPU_PP_SENSOR_MEM_TEMP:
   2217		ret = navi1x_get_smu_metrics_data(smu,
   2218						  METRICS_TEMPERATURE_MEM,
   2219						  (uint32_t *)data);
   2220		*size = 4;
   2221		break;
   2222	case AMDGPU_PP_SENSOR_GFX_MCLK:
   2223		ret = navi10_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
   2224		*(uint32_t *)data *= 100;
   2225		*size = 4;
   2226		break;
   2227	case AMDGPU_PP_SENSOR_GFX_SCLK:
   2228		ret = navi1x_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXCLK, (uint32_t *)data);
   2229		*(uint32_t *)data *= 100;
   2230		*size = 4;
   2231		break;
   2232	case AMDGPU_PP_SENSOR_VDDGFX:
   2233		ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
   2234		*size = 4;
   2235		break;
   2236	default:
   2237		ret = -EOPNOTSUPP;
   2238		break;
   2239	}
   2240
   2241	return ret;
   2242}
   2243
   2244static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states)
   2245{
   2246	uint32_t num_discrete_levels = 0;
   2247	uint16_t *dpm_levels = NULL;
   2248	uint16_t i = 0;
   2249	struct smu_table_context *table_context = &smu->smu_table;
   2250	PPTable_t *driver_ppt = NULL;
   2251
   2252	if (!clocks_in_khz || !num_states || !table_context->driver_pptable)
   2253		return -EINVAL;
   2254
   2255	driver_ppt = table_context->driver_pptable;
   2256	num_discrete_levels = driver_ppt->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels;
   2257	dpm_levels = driver_ppt->FreqTableUclk;
   2258
   2259	if (num_discrete_levels == 0 || dpm_levels == NULL)
   2260		return -EINVAL;
   2261
   2262	*num_states = num_discrete_levels;
   2263	for (i = 0; i < num_discrete_levels; i++) {
   2264		/* convert to khz */
   2265		*clocks_in_khz = (*dpm_levels) * 1000;
   2266		clocks_in_khz++;
   2267		dpm_levels++;
   2268	}
   2269
   2270	return 0;
   2271}
   2272
   2273static int navi10_get_thermal_temperature_range(struct smu_context *smu,
   2274						struct smu_temperature_range *range)
   2275{
   2276	struct smu_table_context *table_context = &smu->smu_table;
   2277	struct smu_11_0_powerplay_table *powerplay_table =
   2278				table_context->power_play_table;
   2279	PPTable_t *pptable = smu->smu_table.driver_pptable;
   2280
   2281	if (!range)
   2282		return -EINVAL;
   2283
   2284	memcpy(range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
   2285
   2286	range->max = pptable->TedgeLimit *
   2287		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   2288	range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
   2289		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   2290	range->hotspot_crit_max = pptable->ThotspotLimit *
   2291		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   2292	range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
   2293		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   2294	range->mem_crit_max = pptable->TmemLimit *
   2295		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   2296	range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)*
   2297		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   2298	range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
   2299
   2300	return 0;
   2301}
   2302
   2303static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
   2304						bool disable_memory_clock_switch)
   2305{
   2306	int ret = 0;
   2307	struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
   2308		(struct smu_11_0_max_sustainable_clocks *)
   2309			smu->smu_table.max_sustainable_clocks;
   2310	uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal;
   2311	uint32_t max_memory_clock = max_sustainable_clocks->uclock;
   2312
   2313	if(smu->disable_uclk_switch == disable_memory_clock_switch)
   2314		return 0;
   2315
   2316	if(disable_memory_clock_switch)
   2317		ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, max_memory_clock, 0);
   2318	else
   2319		ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_memory_clock, 0);
   2320
   2321	if(!ret)
   2322		smu->disable_uclk_switch = disable_memory_clock_switch;
   2323
   2324	return ret;
   2325}
   2326
   2327static int navi10_get_power_limit(struct smu_context *smu,
   2328				  uint32_t *current_power_limit,
   2329				  uint32_t *default_power_limit,
   2330				  uint32_t *max_power_limit)
   2331{
   2332	struct smu_11_0_powerplay_table *powerplay_table =
   2333		(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
   2334	struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
   2335	PPTable_t *pptable = smu->smu_table.driver_pptable;
   2336	uint32_t power_limit, od_percent;
   2337
   2338	if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
   2339		/* the last hope to figure out the ppt limit */
   2340		if (!pptable) {
   2341			dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!");
   2342			return -EINVAL;
   2343		}
   2344		power_limit =
   2345			pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
   2346	}
   2347
   2348	if (current_power_limit)
   2349		*current_power_limit = power_limit;
   2350	if (default_power_limit)
   2351		*default_power_limit = power_limit;
   2352
   2353	if (max_power_limit) {
   2354		if (smu->od_enabled &&
   2355		    navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
   2356			od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
   2357
   2358			dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
   2359
   2360			power_limit *= (100 + od_percent);
   2361			power_limit /= 100;
   2362		}
   2363
   2364		*max_power_limit = power_limit;
   2365	}
   2366
   2367	return 0;
   2368}
   2369
   2370static int navi10_update_pcie_parameters(struct smu_context *smu,
   2371				     uint32_t pcie_gen_cap,
   2372				     uint32_t pcie_width_cap)
   2373{
   2374	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
   2375	PPTable_t *pptable = smu->smu_table.driver_pptable;
   2376	uint32_t smu_pcie_arg;
   2377	int ret, i;
   2378
   2379	/* lclk dpm table setup */
   2380	for (i = 0; i < MAX_PCIE_CONF; i++) {
   2381		dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pptable->PcieGenSpeed[i];
   2382		dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pptable->PcieLaneCount[i];
   2383	}
   2384
   2385	for (i = 0; i < NUM_LINK_LEVELS; i++) {
   2386		smu_pcie_arg = (i << 16) |
   2387			((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
   2388				(pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
   2389					pptable->PcieLaneCount[i] : pcie_width_cap);
   2390		ret = smu_cmn_send_smc_msg_with_param(smu,
   2391					  SMU_MSG_OverridePcieParameters,
   2392					  smu_pcie_arg,
   2393					  NULL);
   2394
   2395		if (ret)
   2396			return ret;
   2397
   2398		if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
   2399			dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
   2400		if (pptable->PcieLaneCount[i] > pcie_width_cap)
   2401			dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
   2402	}
   2403
   2404	return 0;
   2405}
   2406
   2407static inline void navi10_dump_od_table(struct smu_context *smu,
   2408					OverDriveTable_t *od_table)
   2409{
   2410	dev_dbg(smu->adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
   2411	dev_dbg(smu->adev->dev, "OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
   2412	dev_dbg(smu->adev->dev, "OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
   2413	dev_dbg(smu->adev->dev, "OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
   2414	dev_dbg(smu->adev->dev, "OD: UclkFmax: %d\n", od_table->UclkFmax);
   2415	dev_dbg(smu->adev->dev, "OD: OverDrivePct: %d\n", od_table->OverDrivePct);
   2416}
   2417
   2418static int navi10_od_setting_check_range(struct smu_context *smu,
   2419					 struct smu_11_0_overdrive_table *od_table,
   2420					 enum SMU_11_0_ODSETTING_ID setting,
   2421					 uint32_t value)
   2422{
   2423	if (value < od_table->min[setting]) {
   2424		dev_warn(smu->adev->dev, "OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
   2425		return -EINVAL;
   2426	}
   2427	if (value > od_table->max[setting]) {
   2428		dev_warn(smu->adev->dev, "OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
   2429		return -EINVAL;
   2430	}
   2431	return 0;
   2432}
   2433
   2434static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
   2435						     uint16_t *voltage,
   2436						     uint32_t freq)
   2437{
   2438	uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16);
   2439	uint32_t value = 0;
   2440	int ret;
   2441
   2442	ret = smu_cmn_send_smc_msg_with_param(smu,
   2443					  SMU_MSG_GetVoltageByDpm,
   2444					  param,
   2445					  &value);
   2446	if (ret) {
   2447		dev_err(smu->adev->dev, "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
   2448		return ret;
   2449	}
   2450
   2451	*voltage = (uint16_t)value;
   2452
   2453	return 0;
   2454}
   2455
   2456static int navi10_baco_enter(struct smu_context *smu)
   2457{
   2458	struct amdgpu_device *adev = smu->adev;
   2459
   2460	/*
   2461	 * This aims the case below:
   2462	 *   amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
   2463	 *
   2464	 * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
   2465	 * make that possible, PMFW needs to acknowledge the dstate transition
   2466	 * process for both gfx(function 0) and audio(function 1) function of
   2467	 * the ASIC.
   2468	 *
   2469	 * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
   2470	 * device representing the audio function of the ASIC. And that means
   2471	 * even if the sound driver(snd_hda_intel) was not loaded yet, it's still
   2472	 * possible runpm suspend kicked on the ASIC. However without the dstate
   2473	 * transition notification from audio function, pmfw cannot handle the
   2474	 * BACO in/exit correctly. And that will cause driver hang on runpm
   2475	 * resuming.
   2476	 *
   2477	 * To address this, we revert to legacy message way(driver masters the
   2478	 * timing for BACO in/exit) on sound driver missing.
   2479	 */
   2480	if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
   2481		return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
   2482	else
   2483		return smu_v11_0_baco_enter(smu);
   2484}
   2485
   2486static int navi10_baco_exit(struct smu_context *smu)
   2487{
   2488	struct amdgpu_device *adev = smu->adev;
   2489
   2490	if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
   2491		/* Wait for PMFW handling for the Dstate change */
   2492		msleep(10);
   2493		return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
   2494	} else {
   2495		return smu_v11_0_baco_exit(smu);
   2496	}
   2497}
   2498
   2499static int navi10_set_default_od_settings(struct smu_context *smu)
   2500{
   2501	OverDriveTable_t *od_table =
   2502		(OverDriveTable_t *)smu->smu_table.overdrive_table;
   2503	OverDriveTable_t *boot_od_table =
   2504		(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
   2505	OverDriveTable_t *user_od_table =
   2506		(OverDriveTable_t *)smu->smu_table.user_overdrive_table;
   2507	int ret = 0;
   2508
   2509	/*
   2510	 * For S3/S4/Runpm resume, no need to setup those overdrive tables again as
   2511	 *   - either they already have the default OD settings got during cold bootup
   2512	 *   - or they have some user customized OD settings which cannot be overwritten
   2513	 */
   2514	if (smu->adev->in_suspend)
   2515		return 0;
   2516
   2517	ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)boot_od_table, false);
   2518	if (ret) {
   2519		dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
   2520		return ret;
   2521	}
   2522
   2523	if (!boot_od_table->GfxclkVolt1) {
   2524		ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
   2525								&boot_od_table->GfxclkVolt1,
   2526								boot_od_table->GfxclkFreq1);
   2527		if (ret)
   2528			return ret;
   2529	}
   2530
   2531	if (!boot_od_table->GfxclkVolt2) {
   2532		ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
   2533								&boot_od_table->GfxclkVolt2,
   2534								boot_od_table->GfxclkFreq2);
   2535		if (ret)
   2536			return ret;
   2537	}
   2538
   2539	if (!boot_od_table->GfxclkVolt3) {
   2540		ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
   2541								&boot_od_table->GfxclkVolt3,
   2542								boot_od_table->GfxclkFreq3);
   2543		if (ret)
   2544			return ret;
   2545	}
   2546
   2547	navi10_dump_od_table(smu, boot_od_table);
   2548
   2549	memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t));
   2550	memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
   2551
   2552	return 0;
   2553}
   2554
   2555static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
   2556	int i;
   2557	int ret = 0;
   2558	struct smu_table_context *table_context = &smu->smu_table;
   2559	OverDriveTable_t *od_table;
   2560	struct smu_11_0_overdrive_table *od_settings;
   2561	enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
   2562	uint16_t *freq_ptr, *voltage_ptr;
   2563	od_table = (OverDriveTable_t *)table_context->overdrive_table;
   2564
   2565	if (!smu->od_enabled) {
   2566		dev_warn(smu->adev->dev, "OverDrive is not enabled!\n");
   2567		return -EINVAL;
   2568	}
   2569
   2570	if (!smu->od_settings) {
   2571		dev_err(smu->adev->dev, "OD board limits are not set!\n");
   2572		return -ENOENT;
   2573	}
   2574
   2575	od_settings = smu->od_settings;
   2576
   2577	switch (type) {
   2578	case PP_OD_EDIT_SCLK_VDDC_TABLE:
   2579		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
   2580			dev_warn(smu->adev->dev, "GFXCLK_LIMITS not supported!\n");
   2581			return -ENOTSUPP;
   2582		}
   2583		if (!table_context->overdrive_table) {
   2584			dev_err(smu->adev->dev, "Overdrive is not initialized\n");
   2585			return -EINVAL;
   2586		}
   2587		for (i = 0; i < size; i += 2) {
   2588			if (i + 2 > size) {
   2589				dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
   2590				return -EINVAL;
   2591			}
   2592			switch (input[i]) {
   2593			case 0:
   2594				freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
   2595				freq_ptr = &od_table->GfxclkFmin;
   2596				if (input[i + 1] > od_table->GfxclkFmax) {
   2597					dev_info(smu->adev->dev, "GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
   2598						input[i + 1],
   2599						od_table->GfxclkFmin);
   2600					return -EINVAL;
   2601				}
   2602				break;
   2603			case 1:
   2604				freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
   2605				freq_ptr = &od_table->GfxclkFmax;
   2606				if (input[i + 1] < od_table->GfxclkFmin) {
   2607					dev_info(smu->adev->dev, "GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
   2608						input[i + 1],
   2609						od_table->GfxclkFmax);
   2610					return -EINVAL;
   2611				}
   2612				break;
   2613			default:
   2614				dev_info(smu->adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
   2615				dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
   2616				return -EINVAL;
   2617			}
   2618			ret = navi10_od_setting_check_range(smu, od_settings, freq_setting, input[i + 1]);
   2619			if (ret)
   2620				return ret;
   2621			*freq_ptr = input[i + 1];
   2622		}
   2623		break;
   2624	case PP_OD_EDIT_MCLK_VDDC_TABLE:
   2625		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
   2626			dev_warn(smu->adev->dev, "UCLK_MAX not supported!\n");
   2627			return -ENOTSUPP;
   2628		}
   2629		if (size < 2) {
   2630			dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
   2631			return -EINVAL;
   2632		}
   2633		if (input[0] != 1) {
   2634			dev_info(smu->adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
   2635			dev_info(smu->adev->dev, "Supported indices: [1:max]\n");
   2636			return -EINVAL;
   2637		}
   2638		ret = navi10_od_setting_check_range(smu, od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
   2639		if (ret)
   2640			return ret;
   2641		od_table->UclkFmax = input[1];
   2642		break;
   2643	case PP_OD_RESTORE_DEFAULT_TABLE:
   2644		if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
   2645			dev_err(smu->adev->dev, "Overdrive table was not initialized!\n");
   2646			return -EINVAL;
   2647		}
   2648		memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
   2649		break;
   2650	case PP_OD_COMMIT_DPM_TABLE:
   2651		if (memcmp(od_table, table_context->user_overdrive_table, sizeof(OverDriveTable_t))) {
   2652			navi10_dump_od_table(smu, od_table);
   2653			ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
   2654			if (ret) {
   2655				dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
   2656				return ret;
   2657			}
   2658			memcpy(table_context->user_overdrive_table, od_table, sizeof(OverDriveTable_t));
   2659			smu->user_dpm_profile.user_od = true;
   2660
   2661			if (!memcmp(table_context->user_overdrive_table,
   2662				    table_context->boot_overdrive_table,
   2663				    sizeof(OverDriveTable_t)))
   2664				smu->user_dpm_profile.user_od = false;
   2665		}
   2666		break;
   2667	case PP_OD_EDIT_VDDC_CURVE:
   2668		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
   2669			dev_warn(smu->adev->dev, "GFXCLK_CURVE not supported!\n");
   2670			return -ENOTSUPP;
   2671		}
   2672		if (size < 3) {
   2673			dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
   2674			return -EINVAL;
   2675		}
   2676		if (!od_table) {
   2677			dev_info(smu->adev->dev, "Overdrive is not initialized\n");
   2678			return -EINVAL;
   2679		}
   2680
   2681		switch (input[0]) {
   2682		case 0:
   2683			freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
   2684			voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
   2685			freq_ptr = &od_table->GfxclkFreq1;
   2686			voltage_ptr = &od_table->GfxclkVolt1;
   2687			break;
   2688		case 1:
   2689			freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
   2690			voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
   2691			freq_ptr = &od_table->GfxclkFreq2;
   2692			voltage_ptr = &od_table->GfxclkVolt2;
   2693			break;
   2694		case 2:
   2695			freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
   2696			voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
   2697			freq_ptr = &od_table->GfxclkFreq3;
   2698			voltage_ptr = &od_table->GfxclkVolt3;
   2699			break;
   2700		default:
   2701			dev_info(smu->adev->dev, "Invalid VDDC_CURVE index: %ld\n", input[0]);
   2702			dev_info(smu->adev->dev, "Supported indices: [0, 1, 2]\n");
   2703			return -EINVAL;
   2704		}
   2705		ret = navi10_od_setting_check_range(smu, od_settings, freq_setting, input[1]);
   2706		if (ret)
   2707			return ret;
   2708		// Allow setting zero to disable the OverDrive VDDC curve
   2709		if (input[2] != 0) {
   2710			ret = navi10_od_setting_check_range(smu, od_settings, voltage_setting, input[2]);
   2711			if (ret)
   2712				return ret;
   2713			*freq_ptr = input[1];
   2714			*voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
   2715			dev_dbg(smu->adev->dev, "OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
   2716		} else {
   2717			// If setting 0, disable all voltage curve settings
   2718			od_table->GfxclkVolt1 = 0;
   2719			od_table->GfxclkVolt2 = 0;
   2720			od_table->GfxclkVolt3 = 0;
   2721		}
   2722		navi10_dump_od_table(smu, od_table);
   2723		break;
   2724	default:
   2725		return -ENOSYS;
   2726	}
   2727	return ret;
   2728}
   2729
   2730static int navi10_run_btc(struct smu_context *smu)
   2731{
   2732	int ret = 0;
   2733
   2734	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
   2735	if (ret)
   2736		dev_err(smu->adev->dev, "RunBtc failed!\n");
   2737
   2738	return ret;
   2739}
   2740
   2741static bool navi10_need_umc_cdr_workaround(struct smu_context *smu)
   2742{
   2743	struct amdgpu_device *adev = smu->adev;
   2744
   2745	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
   2746		return false;
   2747
   2748	if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0) ||
   2749	    adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5))
   2750		return true;
   2751
   2752	return false;
   2753}
   2754
   2755static int navi10_umc_hybrid_cdr_workaround(struct smu_context *smu)
   2756{
   2757	uint32_t uclk_count, uclk_min, uclk_max;
   2758	int ret = 0;
   2759
   2760	/* This workaround can be applied only with uclk dpm enabled */
   2761	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
   2762		return 0;
   2763
   2764	ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
   2765	if (ret)
   2766		return ret;
   2767
   2768	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
   2769	if (ret)
   2770		return ret;
   2771
   2772	/*
   2773	 * The NAVI10_UMC_HYBRID_CDR_WORKAROUND_UCLK_THRESHOLD is 750Mhz.
   2774	 * This workaround is needed only when the max uclk frequency
   2775	 * not greater than that.
   2776	 */
   2777	if (uclk_max > 0x2EE)
   2778		return 0;
   2779
   2780	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
   2781	if (ret)
   2782		return ret;
   2783
   2784	/* Force UCLK out of the highest DPM */
   2785	ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, uclk_min);
   2786	if (ret)
   2787		return ret;
   2788
   2789	/* Revert the UCLK Hardmax */
   2790	ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, uclk_max);
   2791	if (ret)
   2792		return ret;
   2793
   2794	/*
   2795	 * In this case, SMU already disabled dummy pstate during enablement
   2796	 * of UCLK DPM, we have to re-enabled it.
   2797	 */
   2798	return smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
   2799}
   2800
   2801static int navi10_set_dummy_pstates_table_location(struct smu_context *smu)
   2802{
   2803	struct smu_table_context *smu_table = &smu->smu_table;
   2804	struct smu_table *dummy_read_table =
   2805				&smu_table->dummy_read_1_table;
   2806	char *dummy_table = dummy_read_table->cpu_addr;
   2807	int ret = 0;
   2808	uint32_t i;
   2809
   2810	for (i = 0; i < 0x40000; i += 0x1000 * 2) {
   2811		memcpy(dummy_table, &NoDbiPrbs7[0], 0x1000);
   2812		dummy_table += 0x1000;
   2813		memcpy(dummy_table, &DbiPrbs7[0], 0x1000);
   2814		dummy_table += 0x1000;
   2815	}
   2816
   2817	amdgpu_asic_flush_hdp(smu->adev, NULL);
   2818
   2819	ret = smu_cmn_send_smc_msg_with_param(smu,
   2820					      SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH,
   2821					      upper_32_bits(dummy_read_table->mc_address),
   2822					      NULL);
   2823	if (ret)
   2824		return ret;
   2825
   2826	return smu_cmn_send_smc_msg_with_param(smu,
   2827					       SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW,
   2828					       lower_32_bits(dummy_read_table->mc_address),
   2829					       NULL);
   2830}
   2831
   2832static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
   2833{
   2834	struct amdgpu_device *adev = smu->adev;
   2835	uint8_t umc_fw_greater_than_v136 = false;
   2836	uint8_t umc_fw_disable_cdr = false;
   2837	uint32_t pmfw_version;
   2838	uint32_t param;
   2839	int ret = 0;
   2840
   2841	if (!navi10_need_umc_cdr_workaround(smu))
   2842		return 0;
   2843
   2844	ret = smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
   2845	if (ret) {
   2846		dev_err(adev->dev, "Failed to get smu version!\n");
   2847		return ret;
   2848	}
   2849
   2850	/*
   2851	 * The messages below are only supported by Navi10 42.53.0 and later
   2852	 * PMFWs and Navi14 53.29.0 and later PMFWs.
   2853	 * - PPSMC_MSG_SetDriverDummyTableDramAddrHigh
   2854	 * - PPSMC_MSG_SetDriverDummyTableDramAddrLow
   2855	 * - PPSMC_MSG_GetUMCFWWA
   2856	 */
   2857	if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (pmfw_version >= 0x2a3500)) ||
   2858	    ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && (pmfw_version >= 0x351D00))) {
   2859		ret = smu_cmn_send_smc_msg_with_param(smu,
   2860						      SMU_MSG_GET_UMC_FW_WA,
   2861						      0,
   2862						      &param);
   2863		if (ret)
   2864			return ret;
   2865
   2866		/* First bit indicates if the UMC f/w is above v137 */
   2867		umc_fw_greater_than_v136 = param & 0x1;
   2868
   2869		/* Second bit indicates if hybrid-cdr is disabled */
   2870		umc_fw_disable_cdr = param & 0x2;
   2871
   2872		/* w/a only allowed if UMC f/w is <= 136 */
   2873		if (umc_fw_greater_than_v136)
   2874			return 0;
   2875
   2876		if (umc_fw_disable_cdr) {
   2877			if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0))
   2878				return navi10_umc_hybrid_cdr_workaround(smu);
   2879		} else {
   2880			return navi10_set_dummy_pstates_table_location(smu);
   2881		}
   2882	} else {
   2883		if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0))
   2884			return navi10_umc_hybrid_cdr_workaround(smu);
   2885	}
   2886
   2887	return 0;
   2888}
   2889
   2890static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
   2891					     void **table)
   2892{
   2893	struct smu_table_context *smu_table = &smu->smu_table;
   2894	struct gpu_metrics_v1_3 *gpu_metrics =
   2895		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
   2896	SmuMetrics_legacy_t metrics;
   2897	int ret = 0;
   2898
   2899	ret = smu_cmn_get_metrics_table(smu,
   2900					NULL,
   2901					true);
   2902	if (ret)
   2903		return ret;
   2904
   2905	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_legacy_t));
   2906
   2907	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
   2908
   2909	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
   2910	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
   2911	gpu_metrics->temperature_mem = metrics.TemperatureMem;
   2912	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
   2913	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
   2914	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
   2915
   2916	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
   2917	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
   2918
   2919	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
   2920
   2921	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
   2922	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
   2923	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
   2924
   2925	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
   2926	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
   2927	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
   2928	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
   2929	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
   2930
   2931	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
   2932	gpu_metrics->indep_throttle_status =
   2933			smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
   2934							   navi1x_throttler_map);
   2935
   2936	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
   2937
   2938	gpu_metrics->pcie_link_width =
   2939			smu_v11_0_get_current_pcie_link_width(smu);
   2940	gpu_metrics->pcie_link_speed =
   2941			smu_v11_0_get_current_pcie_link_speed(smu);
   2942
   2943	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
   2944
   2945	if (metrics.CurrGfxVoltageOffset)
   2946		gpu_metrics->voltage_gfx =
   2947			(155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
   2948	if (metrics.CurrMemVidOffset)
   2949		gpu_metrics->voltage_mem =
   2950			(155000 - 625 * metrics.CurrMemVidOffset) / 100;
   2951	if (metrics.CurrSocVoltageOffset)
   2952		gpu_metrics->voltage_soc =
   2953			(155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
   2954
   2955	*table = (void *)gpu_metrics;
   2956
   2957	return sizeof(struct gpu_metrics_v1_3);
   2958}
   2959
   2960static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
   2961			   struct i2c_msg *msg, int num_msgs)
   2962{
   2963	struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
   2964	struct amdgpu_device *adev = smu_i2c->adev;
   2965	struct smu_context *smu = adev->powerplay.pp_handle;
   2966	struct smu_table_context *smu_table = &smu->smu_table;
   2967	struct smu_table *table = &smu_table->driver_table;
   2968	SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
   2969	int i, j, r, c;
   2970	u16 dir;
   2971
   2972	if (!adev->pm.dpm_enabled)
   2973		return -EBUSY;
   2974
   2975	req = kzalloc(sizeof(*req), GFP_KERNEL);
   2976	if (!req)
   2977		return -ENOMEM;
   2978
   2979	req->I2CcontrollerPort = smu_i2c->port;
   2980	req->I2CSpeed = I2C_SPEED_FAST_400K;
   2981	req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
   2982	dir = msg[0].flags & I2C_M_RD;
   2983
   2984	for (c = i = 0; i < num_msgs; i++) {
   2985		for (j = 0; j < msg[i].len; j++, c++) {
   2986			SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
   2987
   2988			if (!(msg[i].flags & I2C_M_RD)) {
   2989				/* write */
   2990				cmd->Cmd = I2C_CMD_WRITE;
   2991				cmd->RegisterAddr = msg[i].buf[j];
   2992			}
   2993
   2994			if ((dir ^ msg[i].flags) & I2C_M_RD) {
   2995				/* The direction changes.
   2996				 */
   2997				dir = msg[i].flags & I2C_M_RD;
   2998				cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
   2999			}
   3000
   3001			req->NumCmds++;
   3002
   3003			/*
   3004			 * Insert STOP if we are at the last byte of either last
   3005			 * message for the transaction or the client explicitly
   3006			 * requires a STOP at this particular message.
   3007			 */
   3008			if ((j == msg[i].len - 1) &&
   3009			    ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
   3010				cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
   3011				cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
   3012			}
   3013		}
   3014	}
   3015	mutex_lock(&adev->pm.mutex);
   3016	r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
   3017	mutex_unlock(&adev->pm.mutex);
   3018	if (r)
   3019		goto fail;
   3020
   3021	for (c = i = 0; i < num_msgs; i++) {
   3022		if (!(msg[i].flags & I2C_M_RD)) {
   3023			c += msg[i].len;
   3024			continue;
   3025		}
   3026		for (j = 0; j < msg[i].len; j++, c++) {
   3027			SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
   3028
   3029			msg[i].buf[j] = cmd->Data;
   3030		}
   3031	}
   3032	r = num_msgs;
   3033fail:
   3034	kfree(req);
   3035	return r;
   3036}
   3037
   3038static u32 navi10_i2c_func(struct i2c_adapter *adap)
   3039{
   3040	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
   3041}
   3042
   3043
   3044static const struct i2c_algorithm navi10_i2c_algo = {
   3045	.master_xfer = navi10_i2c_xfer,
   3046	.functionality = navi10_i2c_func,
   3047};
   3048
   3049static const struct i2c_adapter_quirks navi10_i2c_control_quirks = {
   3050	.flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
   3051	.max_read_len  = MAX_SW_I2C_COMMANDS,
   3052	.max_write_len = MAX_SW_I2C_COMMANDS,
   3053	.max_comb_1st_msg_len = 2,
   3054	.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
   3055};
   3056
   3057static int navi10_i2c_control_init(struct smu_context *smu)
   3058{
   3059	struct amdgpu_device *adev = smu->adev;
   3060	int res, i;
   3061
   3062	for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
   3063		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
   3064		struct i2c_adapter *control = &smu_i2c->adapter;
   3065
   3066		smu_i2c->adev = adev;
   3067		smu_i2c->port = i;
   3068		mutex_init(&smu_i2c->mutex);
   3069		control->owner = THIS_MODULE;
   3070		control->class = I2C_CLASS_HWMON;
   3071		control->dev.parent = &adev->pdev->dev;
   3072		control->algo = &navi10_i2c_algo;
   3073		snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
   3074		control->quirks = &navi10_i2c_control_quirks;
   3075		i2c_set_adapdata(control, smu_i2c);
   3076
   3077		res = i2c_add_adapter(control);
   3078		if (res) {
   3079			DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
   3080			goto Out_err;
   3081		}
   3082	}
   3083
   3084	adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
   3085	adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
   3086
   3087	return 0;
   3088Out_err:
   3089	for ( ; i >= 0; i--) {
   3090		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
   3091		struct i2c_adapter *control = &smu_i2c->adapter;
   3092
   3093		i2c_del_adapter(control);
   3094	}
   3095	return res;
   3096}
   3097
   3098static void navi10_i2c_control_fini(struct smu_context *smu)
   3099{
   3100	struct amdgpu_device *adev = smu->adev;
   3101	int i;
   3102
   3103	for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
   3104		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
   3105		struct i2c_adapter *control = &smu_i2c->adapter;
   3106
   3107		i2c_del_adapter(control);
   3108	}
   3109	adev->pm.ras_eeprom_i2c_bus = NULL;
   3110	adev->pm.fru_eeprom_i2c_bus = NULL;
   3111}
   3112
   3113static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
   3114				      void **table)
   3115{
   3116	struct smu_table_context *smu_table = &smu->smu_table;
   3117	struct gpu_metrics_v1_3 *gpu_metrics =
   3118		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
   3119	SmuMetrics_t metrics;
   3120	int ret = 0;
   3121
   3122	ret = smu_cmn_get_metrics_table(smu,
   3123					NULL,
   3124					true);
   3125	if (ret)
   3126		return ret;
   3127
   3128	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
   3129
   3130	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
   3131
   3132	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
   3133	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
   3134	gpu_metrics->temperature_mem = metrics.TemperatureMem;
   3135	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
   3136	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
   3137	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
   3138
   3139	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
   3140	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
   3141
   3142	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
   3143
   3144	if (metrics.AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD)
   3145		gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs;
   3146	else
   3147		gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs;
   3148
   3149	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
   3150	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs;
   3151
   3152	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
   3153	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
   3154	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
   3155	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
   3156	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
   3157
   3158	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
   3159	gpu_metrics->indep_throttle_status =
   3160			smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
   3161							   navi1x_throttler_map);
   3162
   3163	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
   3164
   3165	gpu_metrics->pcie_link_width = metrics.PcieWidth;
   3166	gpu_metrics->pcie_link_speed = link_speed[metrics.PcieRate];
   3167
   3168	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
   3169
   3170	if (metrics.CurrGfxVoltageOffset)
   3171		gpu_metrics->voltage_gfx =
   3172			(155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
   3173	if (metrics.CurrMemVidOffset)
   3174		gpu_metrics->voltage_mem =
   3175			(155000 - 625 * metrics.CurrMemVidOffset) / 100;
   3176	if (metrics.CurrSocVoltageOffset)
   3177		gpu_metrics->voltage_soc =
   3178			(155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
   3179
   3180	*table = (void *)gpu_metrics;
   3181
   3182	return sizeof(struct gpu_metrics_v1_3);
   3183}
   3184
   3185static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
   3186					     void **table)
   3187{
   3188	struct smu_table_context *smu_table = &smu->smu_table;
   3189	struct gpu_metrics_v1_3 *gpu_metrics =
   3190		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
   3191	SmuMetrics_NV12_legacy_t metrics;
   3192	int ret = 0;
   3193
   3194	ret = smu_cmn_get_metrics_table(smu,
   3195					NULL,
   3196					true);
   3197	if (ret)
   3198		return ret;
   3199
   3200	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_legacy_t));
   3201
   3202	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
   3203
   3204	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
   3205	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
   3206	gpu_metrics->temperature_mem = metrics.TemperatureMem;
   3207	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
   3208	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
   3209	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
   3210
   3211	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
   3212	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
   3213
   3214	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
   3215
   3216	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
   3217	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
   3218	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
   3219
   3220	gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
   3221	gpu_metrics->average_vclk0_frequency = metrics.AverageVclkFrequency;
   3222	gpu_metrics->average_dclk0_frequency = metrics.AverageDclkFrequency;
   3223	gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
   3224
   3225	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
   3226	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
   3227	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
   3228	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
   3229	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
   3230
   3231	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
   3232	gpu_metrics->indep_throttle_status =
   3233			smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
   3234							   navi1x_throttler_map);
   3235
   3236	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
   3237
   3238	gpu_metrics->pcie_link_width =
   3239			smu_v11_0_get_current_pcie_link_width(smu);
   3240	gpu_metrics->pcie_link_speed =
   3241			smu_v11_0_get_current_pcie_link_speed(smu);
   3242
   3243	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
   3244
   3245	if (metrics.CurrGfxVoltageOffset)
   3246		gpu_metrics->voltage_gfx =
   3247			(155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
   3248	if (metrics.CurrMemVidOffset)
   3249		gpu_metrics->voltage_mem =
   3250			(155000 - 625 * metrics.CurrMemVidOffset) / 100;
   3251	if (metrics.CurrSocVoltageOffset)
   3252		gpu_metrics->voltage_soc =
   3253			(155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
   3254
   3255	*table = (void *)gpu_metrics;
   3256
   3257	return sizeof(struct gpu_metrics_v1_3);
   3258}
   3259
   3260static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
   3261				      void **table)
   3262{
   3263	struct smu_table_context *smu_table = &smu->smu_table;
   3264	struct gpu_metrics_v1_3 *gpu_metrics =
   3265		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
   3266	SmuMetrics_NV12_t metrics;
   3267	int ret = 0;
   3268
   3269	ret = smu_cmn_get_metrics_table(smu,
   3270					NULL,
   3271					true);
   3272	if (ret)
   3273		return ret;
   3274
   3275	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
   3276
   3277	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
   3278
   3279	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
   3280	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
   3281	gpu_metrics->temperature_mem = metrics.TemperatureMem;
   3282	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
   3283	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
   3284	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
   3285
   3286	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
   3287	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
   3288
   3289	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
   3290
   3291	if (metrics.AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD)
   3292		gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs;
   3293	else
   3294		gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs;
   3295
   3296	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
   3297	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs;
   3298
   3299	gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
   3300	gpu_metrics->average_vclk0_frequency = metrics.AverageVclkFrequency;
   3301	gpu_metrics->average_dclk0_frequency = metrics.AverageDclkFrequency;
   3302	gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
   3303
   3304	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
   3305	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
   3306	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
   3307	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
   3308	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
   3309
   3310	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
   3311	gpu_metrics->indep_throttle_status =
   3312			smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
   3313							   navi1x_throttler_map);
   3314
   3315	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
   3316
   3317	gpu_metrics->pcie_link_width = metrics.PcieWidth;
   3318	gpu_metrics->pcie_link_speed = link_speed[metrics.PcieRate];
   3319
   3320	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
   3321
   3322	if (metrics.CurrGfxVoltageOffset)
   3323		gpu_metrics->voltage_gfx =
   3324			(155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
   3325	if (metrics.CurrMemVidOffset)
   3326		gpu_metrics->voltage_mem =
   3327			(155000 - 625 * metrics.CurrMemVidOffset) / 100;
   3328	if (metrics.CurrSocVoltageOffset)
   3329		gpu_metrics->voltage_soc =
   3330			(155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
   3331
   3332	*table = (void *)gpu_metrics;
   3333
   3334	return sizeof(struct gpu_metrics_v1_3);
   3335}
   3336
   3337static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
   3338				      void **table)
   3339{
   3340	struct amdgpu_device *adev = smu->adev;
   3341	uint32_t smu_version;
   3342	int ret = 0;
   3343
   3344	ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
   3345	if (ret) {
   3346		dev_err(adev->dev, "Failed to get smu version!\n");
   3347		return ret;
   3348	}
   3349
   3350	switch (adev->ip_versions[MP1_HWIP][0]) {
   3351	case IP_VERSION(11, 0, 9):
   3352		if (smu_version > 0x00341C00)
   3353			ret = navi12_get_gpu_metrics(smu, table);
   3354		else
   3355			ret = navi12_get_legacy_gpu_metrics(smu, table);
   3356		break;
   3357	case IP_VERSION(11, 0, 0):
   3358	case IP_VERSION(11, 0, 5):
   3359	default:
   3360		if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) ||
   3361		      ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))
   3362			ret = navi10_get_gpu_metrics(smu, table);
   3363		else
   3364			ret =navi10_get_legacy_gpu_metrics(smu, table);
   3365		break;
   3366	}
   3367
   3368	return ret;
   3369}
   3370
   3371static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
   3372{
   3373	struct smu_table_context *table_context = &smu->smu_table;
   3374	PPTable_t *smc_pptable = table_context->driver_pptable;
   3375	struct amdgpu_device *adev = smu->adev;
   3376	uint32_t param = 0;
   3377
   3378	/* Navi12 does not support this */
   3379	if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9))
   3380		return 0;
   3381
   3382	/*
   3383	 * Skip the MGpuFanBoost setting for those ASICs
   3384	 * which do not support it
   3385	 */
   3386	if (!smc_pptable->MGpuFanBoostLimitRpm)
   3387		return 0;
   3388
   3389	/* Workaround for WS SKU */
   3390	if (adev->pdev->device == 0x7312 &&
   3391	    adev->pdev->revision == 0)
   3392		param = 0xD188;
   3393
   3394	return smu_cmn_send_smc_msg_with_param(smu,
   3395					       SMU_MSG_SetMGpuFanBoostLimitRpm,
   3396					       param,
   3397					       NULL);
   3398}
   3399
   3400static int navi10_post_smu_init(struct smu_context *smu)
   3401{
   3402	struct amdgpu_device *adev = smu->adev;
   3403	int ret = 0;
   3404
   3405	if (amdgpu_sriov_vf(adev))
   3406		return 0;
   3407
   3408	ret = navi10_run_umc_cdr_workaround(smu);
   3409	if (ret) {
   3410		dev_err(adev->dev, "Failed to apply umc cdr workaround!\n");
   3411		return ret;
   3412	}
   3413
   3414	if (!smu->dc_controlled_by_gpio) {
   3415		/*
   3416		 * For Navi1X, manually switch it to AC mode as PMFW
   3417		 * may boot it with DC mode.
   3418		 */
   3419		ret = smu_v11_0_set_power_source(smu,
   3420						 adev->pm.ac_power ?
   3421						 SMU_POWER_SOURCE_AC :
   3422						 SMU_POWER_SOURCE_DC);
   3423		if (ret) {
   3424			dev_err(adev->dev, "Failed to switch to %s mode!\n",
   3425					adev->pm.ac_power ? "AC" : "DC");
   3426			return ret;
   3427		}
   3428	}
   3429
   3430	return ret;
   3431}
   3432
   3433static int navi10_get_default_config_table_settings(struct smu_context *smu,
   3434						    struct config_table_setting *table)
   3435{
   3436	if (!table)
   3437		return -EINVAL;
   3438
   3439	table->gfxclk_average_tau = 10;
   3440	table->socclk_average_tau = 10;
   3441	table->uclk_average_tau = 10;
   3442	table->gfx_activity_average_tau = 10;
   3443	table->mem_activity_average_tau = 10;
   3444	table->socket_power_average_tau = 10;
   3445
   3446	return 0;
   3447}
   3448
   3449static int navi10_set_config_table(struct smu_context *smu,
   3450				   struct config_table_setting *table)
   3451{
   3452	DriverSmuConfig_t driver_smu_config_table;
   3453
   3454	if (!table)
   3455		return -EINVAL;
   3456
   3457	memset(&driver_smu_config_table,
   3458	       0,
   3459	       sizeof(driver_smu_config_table));
   3460
   3461	driver_smu_config_table.GfxclkAverageLpfTau =
   3462				table->gfxclk_average_tau;
   3463	driver_smu_config_table.SocclkAverageLpfTau =
   3464				table->socclk_average_tau;
   3465	driver_smu_config_table.UclkAverageLpfTau =
   3466				table->uclk_average_tau;
   3467	driver_smu_config_table.GfxActivityLpfTau =
   3468				table->gfx_activity_average_tau;
   3469	driver_smu_config_table.UclkActivityLpfTau =
   3470				table->mem_activity_average_tau;
   3471	driver_smu_config_table.SocketPowerLpfTau =
   3472				table->socket_power_average_tau;
   3473
   3474	return smu_cmn_update_table(smu,
   3475				    SMU_TABLE_DRIVER_SMU_CONFIG,
   3476				    0,
   3477				    (void *)&driver_smu_config_table,
   3478				    true);
   3479}
   3480
   3481static const struct pptable_funcs navi10_ppt_funcs = {
   3482	.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
   3483	.set_default_dpm_table = navi10_set_default_dpm_table,
   3484	.dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
   3485	.dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
   3486	.i2c_init = navi10_i2c_control_init,
   3487	.i2c_fini = navi10_i2c_control_fini,
   3488	.print_clk_levels = navi10_print_clk_levels,
   3489	.emit_clk_levels = navi10_emit_clk_levels,
   3490	.force_clk_levels = navi10_force_clk_levels,
   3491	.populate_umd_state_clk = navi10_populate_umd_state_clk,
   3492	.get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
   3493	.pre_display_config_changed = navi10_pre_display_config_changed,
   3494	.display_config_changed = navi10_display_config_changed,
   3495	.notify_smc_display_config = navi10_notify_smc_display_config,
   3496	.is_dpm_running = navi10_is_dpm_running,
   3497	.get_fan_speed_pwm = smu_v11_0_get_fan_speed_pwm,
   3498	.get_fan_speed_rpm = navi10_get_fan_speed_rpm,
   3499	.get_power_profile_mode = navi10_get_power_profile_mode,
   3500	.set_power_profile_mode = navi10_set_power_profile_mode,
   3501	.set_watermarks_table = navi10_set_watermarks_table,
   3502	.read_sensor = navi10_read_sensor,
   3503	.get_uclk_dpm_states = navi10_get_uclk_dpm_states,
   3504	.set_performance_level = smu_v11_0_set_performance_level,
   3505	.get_thermal_temperature_range = navi10_get_thermal_temperature_range,
   3506	.display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
   3507	.get_power_limit = navi10_get_power_limit,
   3508	.update_pcie_parameters = navi10_update_pcie_parameters,
   3509	.init_microcode = smu_v11_0_init_microcode,
   3510	.load_microcode = smu_v11_0_load_microcode,
   3511	.fini_microcode = smu_v11_0_fini_microcode,
   3512	.init_smc_tables = navi10_init_smc_tables,
   3513	.fini_smc_tables = smu_v11_0_fini_smc_tables,
   3514	.init_power = smu_v11_0_init_power,
   3515	.fini_power = smu_v11_0_fini_power,
   3516	.check_fw_status = smu_v11_0_check_fw_status,
   3517	.setup_pptable = navi10_setup_pptable,
   3518	.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
   3519	.check_fw_version = smu_v11_0_check_fw_version,
   3520	.write_pptable = smu_cmn_write_pptable,
   3521	.set_driver_table_location = smu_v11_0_set_driver_table_location,
   3522	.set_tool_table_location = smu_v11_0_set_tool_table_location,
   3523	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
   3524	.system_features_control = smu_v11_0_system_features_control,
   3525	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
   3526	.send_smc_msg = smu_cmn_send_smc_msg,
   3527	.init_display_count = smu_v11_0_init_display_count,
   3528	.set_allowed_mask = smu_v11_0_set_allowed_mask,
   3529	.get_enabled_mask = smu_cmn_get_enabled_mask,
   3530	.feature_is_enabled = smu_cmn_feature_is_enabled,
   3531	.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
   3532	.notify_display_change = smu_v11_0_notify_display_change,
   3533	.set_power_limit = smu_v11_0_set_power_limit,
   3534	.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
   3535	.enable_thermal_alert = smu_v11_0_enable_thermal_alert,
   3536	.disable_thermal_alert = smu_v11_0_disable_thermal_alert,
   3537	.set_min_dcef_deep_sleep = smu_v11_0_set_min_deep_sleep_dcefclk,
   3538	.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
   3539	.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
   3540	.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
   3541	.set_fan_speed_pwm = smu_v11_0_set_fan_speed_pwm,
   3542	.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
   3543	.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
   3544	.gfx_off_control = smu_v11_0_gfx_off_control,
   3545	.register_irq_handler = smu_v11_0_register_irq_handler,
   3546	.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
   3547	.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
   3548	.baco_is_support = smu_v11_0_baco_is_support,
   3549	.baco_get_state = smu_v11_0_baco_get_state,
   3550	.baco_set_state = smu_v11_0_baco_set_state,
   3551	.baco_enter = navi10_baco_enter,
   3552	.baco_exit = navi10_baco_exit,
   3553	.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
   3554	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
   3555	.set_default_od_settings = navi10_set_default_od_settings,
   3556	.od_edit_dpm_table = navi10_od_edit_dpm_table,
   3557	.restore_user_od_settings = smu_v11_0_restore_user_od_settings,
   3558	.run_btc = navi10_run_btc,
   3559	.set_power_source = smu_v11_0_set_power_source,
   3560	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
   3561	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
   3562	.get_gpu_metrics = navi1x_get_gpu_metrics,
   3563	.enable_mgpu_fan_boost = navi10_enable_mgpu_fan_boost,
   3564	.gfx_ulv_control = smu_v11_0_gfx_ulv_control,
   3565	.deep_sleep_control = smu_v11_0_deep_sleep_control,
   3566	.get_fan_parameters = navi10_get_fan_parameters,
   3567	.post_init = navi10_post_smu_init,
   3568	.interrupt_work = smu_v11_0_interrupt_work,
   3569	.set_mp1_state = smu_cmn_set_mp1_state,
   3570	.get_default_config_table_settings = navi10_get_default_config_table_settings,
   3571	.set_config_table = navi10_set_config_table,
   3572};
   3573
   3574void navi10_set_ppt_funcs(struct smu_context *smu)
   3575{
   3576	smu->ppt_funcs = &navi10_ppt_funcs;
   3577	smu->message_map = navi10_message_map;
   3578	smu->clock_map = navi10_clk_map;
   3579	smu->feature_map = navi10_feature_mask_map;
   3580	smu->table_map = navi10_table_map;
   3581	smu->pwr_src_map = navi10_pwr_src_map;
   3582	smu->workload_map = navi10_workload_map;
   3583}