cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ci_smumgr.c (99157B)


      1/*
      2 * Copyright 2017 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23#include <linux/module.h>
     24#include <linux/slab.h>
     25#include <linux/fb.h>
     26#include "linux/delay.h"
     27#include <linux/types.h>
     28#include <linux/pci.h>
     29
     30#include "smumgr.h"
     31#include "pp_debug.h"
     32#include "ci_smumgr.h"
     33#include "ppsmc.h"
     34#include "smu7_hwmgr.h"
     35#include "hardwaremanager.h"
     36#include "ppatomctrl.h"
     37#include "cgs_common.h"
     38#include "atombios.h"
     39#include "pppcielanes.h"
     40#include "smu7_smumgr.h"
     41
     42#include "smu/smu_7_0_1_d.h"
     43#include "smu/smu_7_0_1_sh_mask.h"
     44
     45#include "dce/dce_8_0_d.h"
     46#include "dce/dce_8_0_sh_mask.h"
     47
     48#include "bif/bif_4_1_d.h"
     49#include "bif/bif_4_1_sh_mask.h"
     50
     51#include "gca/gfx_7_2_d.h"
     52#include "gca/gfx_7_2_sh_mask.h"
     53
     54#include "gmc/gmc_7_1_d.h"
     55#include "gmc/gmc_7_1_sh_mask.h"
     56
     57#include "processpptables.h"
     58
     59#define MC_CG_ARB_FREQ_F0           0x0a
     60#define MC_CG_ARB_FREQ_F1           0x0b
     61#define MC_CG_ARB_FREQ_F2           0x0c
     62#define MC_CG_ARB_FREQ_F3           0x0d
     63
     64#define SMC_RAM_END 0x40000
     65
     66#define CISLAND_MINIMUM_ENGINE_CLOCK 800
     67#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
     68
     69static const struct ci_pt_defaults defaults_hawaii_xt = {
     70	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
     71	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
     72	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
     73};
     74
     75static const struct ci_pt_defaults defaults_hawaii_pro = {
     76	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
     77	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
     78	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
     79};
     80
     81static const struct ci_pt_defaults defaults_bonaire_xt = {
     82	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
     83	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
     84	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
     85};
     86
     87
     88static const struct ci_pt_defaults defaults_saturn_xt = {
     89	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
     90	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
     91	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
     92};
     93
     94
     95static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
     96					uint32_t smc_addr, uint32_t limit)
     97{
     98	if ((0 != (3 & smc_addr))
     99		|| ((smc_addr + 3) >= limit)) {
    100		pr_err("smc_addr invalid \n");
    101		return -EINVAL;
    102	}
    103
    104	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
    105	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
    106	return 0;
    107}
    108
    109static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
    110				const uint8_t *src, uint32_t byte_count, uint32_t limit)
    111{
    112	int result;
    113	uint32_t data = 0;
    114	uint32_t original_data;
    115	uint32_t addr = 0;
    116	uint32_t extra_shift;
    117
    118	if ((3 & smc_start_address)
    119		|| ((smc_start_address + byte_count) >= limit)) {
    120		pr_err("smc_start_address invalid \n");
    121		return -EINVAL;
    122	}
    123
    124	addr = smc_start_address;
    125
    126	while (byte_count >= 4) {
    127	/* Bytes are written into the SMC address space with the MSB first. */
    128		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
    129
    130		result = ci_set_smc_sram_address(hwmgr, addr, limit);
    131
    132		if (0 != result)
    133			return result;
    134
    135		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
    136
    137		src += 4;
    138		byte_count -= 4;
    139		addr += 4;
    140	}
    141
    142	if (0 != byte_count) {
    143
    144		data = 0;
    145
    146		result = ci_set_smc_sram_address(hwmgr, addr, limit);
    147
    148		if (0 != result)
    149			return result;
    150
    151
    152		original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
    153
    154		extra_shift = 8 * (4 - byte_count);
    155
    156		while (byte_count > 0) {
    157			/* Bytes are written into the SMC addres space with the MSB first. */
    158			data = (0x100 * data) + *src++;
    159			byte_count--;
    160		}
    161
    162		data <<= extra_shift;
    163
    164		data |= (original_data & ~((~0UL) << extra_shift));
    165
    166		result = ci_set_smc_sram_address(hwmgr, addr, limit);
    167
    168		if (0 != result)
    169			return result;
    170
    171		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
    172	}
    173
    174	return 0;
    175}
    176
    177
    178static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
    179{
    180	static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
    181
    182	ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
    183
    184	return 0;
    185}
    186
    187static bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
    188{
    189	return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
    190			CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
    191	&& (0x20100 <= cgs_read_ind_register(hwmgr->device,
    192			CGS_IND_REG__SMC, ixSMC_PC_C)));
    193}
    194
    195static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
    196				uint32_t *value, uint32_t limit)
    197{
    198	int result;
    199
    200	result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
    201
    202	if (result)
    203		return result;
    204
    205	*value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
    206	return 0;
    207}
    208
    209static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
    210{
    211	struct amdgpu_device *adev = hwmgr->adev;
    212	int ret;
    213
    214	cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
    215	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
    216
    217	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
    218
    219	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
    220
    221	if (ret != 1)
    222		dev_info(adev->dev,
    223			"failed to send message %x ret is %d\n", msg,ret);
    224
    225	return 0;
    226}
    227
    228static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
    229					uint16_t msg, uint32_t parameter)
    230{
    231	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
    232	return ci_send_msg_to_smc(hwmgr, msg);
    233}
    234
    235static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
    236{
    237	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
    238	struct amdgpu_device *adev = hwmgr->adev;
    239	uint32_t dev_id;
    240
    241	dev_id = adev->pdev->device;
    242
    243	switch (dev_id) {
    244	case 0x67BA:
    245	case 0x67B1:
    246		smu_data->power_tune_defaults = &defaults_hawaii_pro;
    247		break;
    248	case 0x67B8:
    249	case 0x66B0:
    250		smu_data->power_tune_defaults = &defaults_hawaii_xt;
    251		break;
    252	case 0x6640:
    253	case 0x6641:
    254	case 0x6646:
    255	case 0x6647:
    256		smu_data->power_tune_defaults = &defaults_saturn_xt;
    257		break;
    258	case 0x6649:
    259	case 0x6650:
    260	case 0x6651:
    261	case 0x6658:
    262	case 0x665C:
    263	case 0x665D:
    264	case 0x67A0:
    265	case 0x67A1:
    266	case 0x67A2:
    267	case 0x67A8:
    268	case 0x67A9:
    269	case 0x67AA:
    270	case 0x67B9:
    271	case 0x67BE:
    272	default:
    273		smu_data->power_tune_defaults = &defaults_bonaire_xt;
    274		break;
    275	}
    276}
    277
    278static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
    279	struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
    280	uint32_t clock, uint32_t *vol)
    281{
    282	uint32_t i = 0;
    283
    284	if (allowed_clock_voltage_table->count == 0)
    285		return -EINVAL;
    286
    287	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
    288		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
    289			*vol = allowed_clock_voltage_table->entries[i].v;
    290			return 0;
    291		}
    292	}
    293
    294	*vol = allowed_clock_voltage_table->entries[i - 1].v;
    295	return 0;
    296}
    297
    298static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
    299		uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
    300{
    301	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    302	struct pp_atomctrl_clock_dividers_vi dividers;
    303	uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
    304	uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
    305	uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
    306	uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
    307	uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
    308	uint32_t ref_clock;
    309	uint32_t ref_divider;
    310	uint32_t fbdiv;
    311	int result;
    312
    313	/* get the engine clock dividers for this clock value */
    314	result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock,  &dividers);
    315
    316	PP_ASSERT_WITH_CODE(result == 0,
    317			"Error retrieving Engine Clock dividers from VBIOS.",
    318			return result);
    319
    320	/* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
    321	ref_clock = atomctrl_get_reference_clock(hwmgr);
    322	ref_divider = 1 + dividers.uc_pll_ref_div;
    323
    324	/* low 14 bits is fraction and high 12 bits is divider */
    325	fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
    326
    327	/* SPLL_FUNC_CNTL setup */
    328	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
    329			SPLL_REF_DIV, dividers.uc_pll_ref_div);
    330	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
    331			SPLL_PDIV_A,  dividers.uc_pll_post_div);
    332
    333	/* SPLL_FUNC_CNTL_3 setup*/
    334	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
    335			SPLL_FB_DIV, fbdiv);
    336
    337	/* set to use fractional accumulation*/
    338	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
    339			SPLL_DITHEN, 1);
    340
    341	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
    342				PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
    343		struct pp_atomctrl_internal_ss_info ss_info;
    344		uint32_t vco_freq = clock * dividers.uc_pll_post_div;
    345
    346		if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
    347				vco_freq, &ss_info)) {
    348			uint32_t clk_s = ref_clock * 5 /
    349					(ref_divider * ss_info.speed_spectrum_rate);
    350			uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
    351					fbdiv / (clk_s * 10000);
    352
    353			cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
    354					CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
    355			cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
    356					CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
    357			cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
    358					CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
    359		}
    360	}
    361
    362	sclk->SclkFrequency        = clock;
    363	sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
    364	sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
    365	sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
    366	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
    367	sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
    368
    369	return 0;
    370}
    371
    372static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
    373				const struct phm_phase_shedding_limits_table *pl,
    374					uint32_t sclk, uint32_t *p_shed)
    375{
    376	unsigned int i;
    377
    378	/* use the minimum phase shedding */
    379	*p_shed = 1;
    380
    381	for (i = 0; i < pl->count; i++) {
    382		if (sclk < pl->entries[i].Sclk) {
    383			*p_shed = i;
    384			break;
    385		}
    386	}
    387}
    388
    389static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
    390			uint32_t clock_insr)
    391{
    392	uint8_t i;
    393	uint32_t temp;
    394	uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
    395
    396	if (clock < min) {
    397		pr_info("Engine clock can't satisfy stutter requirement!\n");
    398		return 0;
    399	}
    400	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
    401		temp = clock >> i;
    402
    403		if (temp >= min || i == 0)
    404			break;
    405	}
    406	return i;
    407}
    408
    409static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
    410		uint32_t clock, struct SMU7_Discrete_GraphicsLevel *level)
    411{
    412	int result;
    413	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    414
    415
    416	result = ci_calculate_sclk_params(hwmgr, clock, level);
    417
    418	/* populate graphics levels */
    419	result = ci_get_dependency_volt_by_clk(hwmgr,
    420			hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
    421			(uint32_t *)(&level->MinVddc));
    422	if (result) {
    423		pr_err("vdd_dep_on_sclk table is NULL\n");
    424		return result;
    425	}
    426
    427	level->SclkFrequency = clock;
    428	level->MinVddcPhases = 1;
    429
    430	if (data->vddc_phase_shed_control)
    431		ci_populate_phase_value_based_on_sclk(hwmgr,
    432				hwmgr->dyn_state.vddc_phase_shed_limits_table,
    433				clock,
    434				&level->MinVddcPhases);
    435
    436	level->ActivityLevel = data->current_profile_setting.sclk_activity;
    437	level->CcPwrDynRm = 0;
    438	level->CcPwrDynRm1 = 0;
    439	level->EnabledForActivity = 0;
    440	/* this level can be used for throttling.*/
    441	level->EnabledForThrottle = 1;
    442	level->UpH = data->current_profile_setting.sclk_up_hyst;
    443	level->DownH = data->current_profile_setting.sclk_down_hyst;
    444	level->VoltageDownH = 0;
    445	level->PowerThrottle = 0;
    446
    447
    448	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
    449			PHM_PlatformCaps_SclkDeepSleep))
    450		level->DeepSleepDivId =
    451				ci_get_sleep_divider_id_from_clock(clock,
    452						CISLAND_MINIMUM_ENGINE_CLOCK);
    453
    454	/* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
    455	level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
    456
    457	if (0 == result) {
    458		level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
    459		CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
    460		CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
    461		CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
    462		CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
    463		CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
    464		CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
    465		CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
    466		CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
    467		CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
    468	}
    469
    470	return result;
    471}
    472
    473static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
    474{
    475	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    476	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
    477	struct smu7_dpm_table *dpm_table = &data->dpm_table;
    478	int result = 0;
    479	uint32_t array = smu_data->dpm_table_start +
    480			offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
    481	uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
    482			SMU7_MAX_LEVELS_GRAPHICS;
    483	struct SMU7_Discrete_GraphicsLevel *levels =
    484			smu_data->smc_state_table.GraphicsLevel;
    485	uint32_t i;
    486
    487	for (i = 0; i < dpm_table->sclk_table.count; i++) {
    488		result = ci_populate_single_graphic_level(hwmgr,
    489				dpm_table->sclk_table.dpm_levels[i].value,
    490				&levels[i]);
    491		if (result)
    492			return result;
    493		if (i > 1)
    494			smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
    495		if (i == (dpm_table->sclk_table.count - 1))
    496			smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
    497				PPSMC_DISPLAY_WATERMARK_HIGH;
    498	}
    499
    500	smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
    501
    502	smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
    503	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
    504		phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
    505
    506	result = ci_copy_bytes_to_smc(hwmgr, array,
    507				   (u8 *)levels, array_size,
    508				   SMC_RAM_END);
    509
    510	return result;
    511
    512}
    513
    514static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
    515{
    516	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
    517	const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
    518
    519	smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
    520	smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
    521	smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
    522	smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
    523
    524	return 0;
    525}
    526
    527static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
    528{
    529	uint16_t tdc_limit;
    530	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
    531	const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
    532
    533	tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
    534	smu_data->power_tune_table.TDC_VDDC_PkgLimit =
    535			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
    536	smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
    537			defaults->tdc_vddc_throttle_release_limit_perc;
    538	smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
    539
    540	return 0;
    541}
    542
    543static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
    544{
    545	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
    546	const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
    547	uint32_t temp;
    548
    549	if (ci_read_smc_sram_dword(hwmgr,
    550			fuse_table_offset +
    551			offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
    552			(uint32_t *)&temp, SMC_RAM_END))
    553		PP_ASSERT_WITH_CODE(false,
    554				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
    555				return -EINVAL);
    556	else
    557		smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
    558
    559	return 0;
    560}
    561
    562static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
    563{
    564	uint16_t tmp;
    565	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
    566
    567	if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
    568		|| 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
    569		tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
    570	else
    571		tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
    572
    573	smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
    574
    575	return 0;
    576}
    577
    578static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
    579{
    580	int i;
    581	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
    582	uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
    583	uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
    584	uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
    585
    586	PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
    587			    "The CAC Leakage table does not exist!", return -EINVAL);
    588	PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
    589			    "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
    590	PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
    591			    "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
    592
    593	for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
    594		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
    595			lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
    596			hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
    597			hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
    598		} else {
    599			lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
    600			hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
    601		}
    602	}
    603
    604	return 0;
    605}
    606
    607static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
    608{
    609	int i;
    610	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
    611	uint8_t *vid = smu_data->power_tune_table.VddCVid;
    612	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    613
    614	PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
    615		"There should never be more than 8 entries for VddcVid!!!",
    616		return -EINVAL);
    617
    618	for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
    619		vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
    620
    621	return 0;
    622}
    623
    624static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
    625{
    626	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
    627	u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
    628	u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
    629	int i, min, max;
    630
    631	min = max = hi_vid[0];
    632	for (i = 0; i < 8; i++) {
    633		if (0 != hi_vid[i]) {
    634			if (min > hi_vid[i])
    635				min = hi_vid[i];
    636			if (max < hi_vid[i])
    637				max = hi_vid[i];
    638		}
    639
    640		if (0 != lo_vid[i]) {
    641			if (min > lo_vid[i])
    642				min = lo_vid[i];
    643			if (max < lo_vid[i])
    644				max = lo_vid[i];
    645		}
    646	}
    647
    648	if ((min == 0) || (max == 0))
    649		return -EINVAL;
    650	smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
    651	smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
    652
    653	return 0;
    654}
    655
    656static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
    657{
    658	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
    659	uint16_t HiSidd;
    660	uint16_t LoSidd;
    661	struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
    662
    663	HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
    664	LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
    665
    666	smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
    667			CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
    668	smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
    669			CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
    670
    671	return 0;
    672}
    673
    674static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
    675{
    676	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
    677	uint32_t pm_fuse_table_offset;
    678	int ret = 0;
    679
    680	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
    681			PHM_PlatformCaps_PowerContainment)) {
    682		if (ci_read_smc_sram_dword(hwmgr,
    683				SMU7_FIRMWARE_HEADER_LOCATION +
    684				offsetof(SMU7_Firmware_Header, PmFuseTable),
    685				&pm_fuse_table_offset, SMC_RAM_END)) {
    686			pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
    687			return -EINVAL;
    688		}
    689
    690		/* DW0 - DW3 */
    691		ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
    692		/* DW4 - DW5 */
    693		ret |= ci_populate_vddc_vid(hwmgr);
    694		/* DW6 */
    695		ret |= ci_populate_svi_load_line(hwmgr);
    696		/* DW7 */
    697		ret |= ci_populate_tdc_limit(hwmgr);
    698		/* DW8 */
    699		ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
    700
    701		ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
    702
    703		ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
    704
    705		ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
    706		if (ret)
    707			return ret;
    708
    709		ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
    710				(uint8_t *)&smu_data->power_tune_table,
    711				sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
    712	}
    713	return ret;
    714}
    715
    716static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
    717{
    718	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
    719	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    720	const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
    721	SMU7_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
    722	struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
    723	struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
    724	const uint16_t *def1, *def2;
    725	int i, j, k;
    726
    727	dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
    728	dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
    729
    730	dpm_table->DTETjOffset = 0;
    731	dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
    732	dpm_table->GpuTjHyst = 8;
    733
    734	dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
    735
    736	if (ppm) {
    737		dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
    738		dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
    739	} else {
    740		dpm_table->PPM_PkgPwrLimit = 0;
    741		dpm_table->PPM_TemperatureLimit = 0;
    742	}
    743
    744	CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
    745	CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
    746
    747	dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
    748	def1 = defaults->bapmti_r;
    749	def2 = defaults->bapmti_rc;
    750
    751	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
    752		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
    753			for (k = 0; k < SMU7_DTE_SINKS; k++) {
    754				dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
    755				dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
    756				def1++;
    757				def2++;
    758			}
    759		}
    760	}
    761
    762	return 0;
    763}
    764
    765static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
    766		pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
    767		uint16_t *lo)
    768{
    769	uint16_t v_index;
    770	bool vol_found = false;
    771	*hi = tab->value * VOLTAGE_SCALE;
    772	*lo = tab->value * VOLTAGE_SCALE;
    773
    774	PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
    775			"The SCLK/VDDC Dependency Table does not exist.\n",
    776			return -EINVAL);
    777
    778	if (NULL == hwmgr->dyn_state.cac_leakage_table) {
    779		pr_warn("CAC Leakage Table does not exist, using vddc.\n");
    780		return 0;
    781	}
    782
    783	for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
    784		if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
    785			vol_found = true;
    786			if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
    787				*lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
    788				*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
    789			} else {
    790				pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
    791				*lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
    792				*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
    793			}
    794			break;
    795		}
    796	}
    797
    798	if (!vol_found) {
    799		for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
    800			if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
    801				vol_found = true;
    802				if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
    803					*lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
    804					*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
    805				} else {
    806					pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
    807					*lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
    808					*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
    809				}
    810				break;
    811			}
    812		}
    813
    814		if (!vol_found)
    815			pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
    816	}
    817
    818	return 0;
    819}
    820
    821static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
    822		pp_atomctrl_voltage_table_entry *tab,
    823		SMU7_Discrete_VoltageLevel *smc_voltage_tab)
    824{
    825	int result;
    826
    827	result = ci_get_std_voltage_value_sidd(hwmgr, tab,
    828			&smc_voltage_tab->StdVoltageHiSidd,
    829			&smc_voltage_tab->StdVoltageLoSidd);
    830	if (result) {
    831		smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
    832		smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
    833	}
    834
    835	smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
    836	CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
    837	CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
    838
    839	return 0;
    840}
    841
    842static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
    843			SMU7_Discrete_DpmTable *table)
    844{
    845	unsigned int count;
    846	int result;
    847	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    848
    849	table->VddcLevelCount = data->vddc_voltage_table.count;
    850	for (count = 0; count < table->VddcLevelCount; count++) {
    851		result = ci_populate_smc_voltage_table(hwmgr,
    852				&(data->vddc_voltage_table.entries[count]),
    853				&(table->VddcLevel[count]));
    854		PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
    855
    856		/* GPIO voltage control */
    857		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
    858			table->VddcLevel[count].Smio = (uint8_t) count;
    859			table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low;
    860			table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low;
    861		} else {
    862			table->VddcLevel[count].Smio = 0;
    863		}
    864	}
    865
    866	CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
    867
    868	return 0;
    869}
    870
    871static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
    872			SMU7_Discrete_DpmTable *table)
    873{
    874	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    875	uint32_t count;
    876	int result;
    877
    878	table->VddciLevelCount = data->vddci_voltage_table.count;
    879
    880	for (count = 0; count < table->VddciLevelCount; count++) {
    881		result = ci_populate_smc_voltage_table(hwmgr,
    882				&(data->vddci_voltage_table.entries[count]),
    883				&(table->VddciLevel[count]));
    884		PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
    885		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
    886			table->VddciLevel[count].Smio = (uint8_t) count;
    887			table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low;
    888			table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low;
    889		} else {
    890			table->VddciLevel[count].Smio = 0;
    891		}
    892	}
    893
    894	CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
    895
    896	return 0;
    897}
    898
    899static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
    900			SMU7_Discrete_DpmTable *table)
    901{
    902	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    903	uint32_t count;
    904	int result;
    905
    906	table->MvddLevelCount = data->mvdd_voltage_table.count;
    907
    908	for (count = 0; count < table->MvddLevelCount; count++) {
    909		result = ci_populate_smc_voltage_table(hwmgr,
    910				&(data->mvdd_voltage_table.entries[count]),
    911				&table->MvddLevel[count]);
    912		PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
    913		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
    914			table->MvddLevel[count].Smio = (uint8_t) count;
    915			table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low;
    916			table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low;
    917		} else {
    918			table->MvddLevel[count].Smio = 0;
    919		}
    920	}
    921
    922	CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
    923
    924	return 0;
    925}
    926
    927
    928static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
    929	SMU7_Discrete_DpmTable *table)
    930{
    931	int result;
    932
    933	result = ci_populate_smc_vddc_table(hwmgr, table);
    934	PP_ASSERT_WITH_CODE(0 == result,
    935			"can not populate VDDC voltage table to SMC", return -EINVAL);
    936
    937	result = ci_populate_smc_vdd_ci_table(hwmgr, table);
    938	PP_ASSERT_WITH_CODE(0 == result,
    939			"can not populate VDDCI voltage table to SMC", return -EINVAL);
    940
    941	result = ci_populate_smc_mvdd_table(hwmgr, table);
    942	PP_ASSERT_WITH_CODE(0 == result,
    943			"can not populate MVDD voltage table to SMC", return -EINVAL);
    944
    945	return 0;
    946}
    947
    948static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
    949		struct SMU7_Discrete_Ulv *state)
    950{
    951	uint32_t voltage_response_time, ulv_voltage;
    952	int result;
    953	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    954
    955	state->CcPwrDynRm = 0;
    956	state->CcPwrDynRm1 = 0;
    957
    958	result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
    959	PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
    960
    961	if (ulv_voltage == 0) {
    962		data->ulv_supported = false;
    963		return 0;
    964	}
    965
    966	if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
    967		/* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
    968		if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
    969			state->VddcOffset = 0;
    970		else
    971			/* used in SMIO Mode. not implemented for now. this is backup only for CI. */
    972			state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
    973	} else {
    974		/* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
    975		if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
    976			state->VddcOffsetVid = 0;
    977		else  /* used in SVI2 Mode */
    978			state->VddcOffsetVid = (uint8_t)(
    979					(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
    980						* VOLTAGE_VID_OFFSET_SCALE2
    981						/ VOLTAGE_VID_OFFSET_SCALE1);
    982	}
    983	state->VddcPhase = 1;
    984
    985	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
    986	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
    987	CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
    988
    989	return 0;
    990}
    991
    992static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
    993		 SMU7_Discrete_Ulv *ulv_level)
    994{
    995	return ci_populate_ulv_level(hwmgr, ulv_level);
    996}
    997
    998static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
    999{
   1000	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1001	struct smu7_dpm_table *dpm_table = &data->dpm_table;
   1002	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   1003	uint32_t i;
   1004
   1005/* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
   1006	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
   1007		table->LinkLevel[i].PcieGenSpeed  =
   1008			(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
   1009		table->LinkLevel[i].PcieLaneCount =
   1010			(uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
   1011		table->LinkLevel[i].EnabledForActivity = 1;
   1012		table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
   1013		table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
   1014	}
   1015
   1016	smu_data->smc_state_table.LinkLevelCount =
   1017		(uint8_t)dpm_table->pcie_speed_table.count;
   1018	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
   1019		phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
   1020
   1021	return 0;
   1022}
   1023
   1024static int ci_calculate_mclk_params(
   1025		struct pp_hwmgr *hwmgr,
   1026		uint32_t memory_clock,
   1027		SMU7_Discrete_MemoryLevel *mclk,
   1028		bool strobe_mode,
   1029		bool dllStateOn
   1030		)
   1031{
   1032	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1033	uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
   1034	uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
   1035	uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
   1036	uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
   1037	uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
   1038	uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
   1039	uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
   1040	uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
   1041	uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
   1042
   1043	pp_atomctrl_memory_clock_param mpll_param;
   1044	int result;
   1045
   1046	result = atomctrl_get_memory_pll_dividers_si(hwmgr,
   1047				memory_clock, &mpll_param, strobe_mode);
   1048	PP_ASSERT_WITH_CODE(0 == result,
   1049		"Error retrieving Memory Clock Parameters from VBIOS.", return result);
   1050
   1051	mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
   1052
   1053	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
   1054							MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
   1055	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
   1056							MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
   1057	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
   1058							MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
   1059
   1060	mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
   1061							MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
   1062
   1063	if (data->is_memory_gddr5) {
   1064		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
   1065								MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
   1066		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
   1067								MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
   1068	}
   1069
   1070	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1071			PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
   1072		pp_atomctrl_internal_ss_info ss_info;
   1073		uint32_t freq_nom;
   1074		uint32_t tmp;
   1075		uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
   1076
   1077		/* for GDDR5 for all modes and DDR3 */
   1078		if (1 == mpll_param.qdr)
   1079			freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
   1080		else
   1081			freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
   1082
   1083		/* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
   1084		tmp = (freq_nom / reference_clock);
   1085		tmp = tmp * tmp;
   1086
   1087		if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
   1088			uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
   1089			uint32_t clkv =
   1090				(uint32_t)((((131 * ss_info.speed_spectrum_percentage *
   1091							ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
   1092
   1093			mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
   1094			mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
   1095		}
   1096	}
   1097
   1098	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
   1099		MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
   1100	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
   1101		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
   1102	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
   1103		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
   1104
   1105
   1106	mclk->MclkFrequency   = memory_clock;
   1107	mclk->MpllFuncCntl    = mpll_func_cntl;
   1108	mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
   1109	mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
   1110	mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
   1111	mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
   1112	mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
   1113	mclk->DllCntl         = dll_cntl;
   1114	mclk->MpllSs1         = mpll_ss1;
   1115	mclk->MpllSs2         = mpll_ss2;
   1116
   1117	return 0;
   1118}
   1119
   1120static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
   1121		bool strobe_mode)
   1122{
   1123	uint8_t mc_para_index;
   1124
   1125	if (strobe_mode) {
   1126		if (memory_clock < 12500)
   1127			mc_para_index = 0x00;
   1128		else if (memory_clock > 47500)
   1129			mc_para_index = 0x0f;
   1130		else
   1131			mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
   1132	} else {
   1133		if (memory_clock < 65000)
   1134			mc_para_index = 0x00;
   1135		else if (memory_clock > 135000)
   1136			mc_para_index = 0x0f;
   1137		else
   1138			mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
   1139	}
   1140
   1141	return mc_para_index;
   1142}
   1143
   1144static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
   1145{
   1146	uint8_t mc_para_index;
   1147
   1148	if (memory_clock < 10000)
   1149		mc_para_index = 0;
   1150	else if (memory_clock >= 80000)
   1151		mc_para_index = 0x0f;
   1152	else
   1153		mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
   1154
   1155	return mc_para_index;
   1156}
   1157
   1158static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
   1159					uint32_t memory_clock, uint32_t *p_shed)
   1160{
   1161	unsigned int i;
   1162
   1163	*p_shed = 1;
   1164
   1165	for (i = 0; i < pl->count; i++) {
   1166		if (memory_clock < pl->entries[i].Mclk) {
   1167			*p_shed = i;
   1168			break;
   1169		}
   1170	}
   1171
   1172	return 0;
   1173}
   1174
   1175static int ci_populate_single_memory_level(
   1176		struct pp_hwmgr *hwmgr,
   1177		uint32_t memory_clock,
   1178		SMU7_Discrete_MemoryLevel *memory_level
   1179		)
   1180{
   1181	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1182	int result = 0;
   1183	bool dll_state_on;
   1184	uint32_t mclk_edc_wr_enable_threshold = 40000;
   1185	uint32_t mclk_edc_enable_threshold = 40000;
   1186	uint32_t mclk_strobe_mode_threshold = 40000;
   1187
   1188	if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
   1189		result = ci_get_dependency_volt_by_clk(hwmgr,
   1190			hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
   1191		PP_ASSERT_WITH_CODE((0 == result),
   1192			"can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
   1193	}
   1194
   1195	if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
   1196		result = ci_get_dependency_volt_by_clk(hwmgr,
   1197				hwmgr->dyn_state.vddci_dependency_on_mclk,
   1198				memory_clock,
   1199				&memory_level->MinVddci);
   1200		PP_ASSERT_WITH_CODE((0 == result),
   1201			"can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
   1202	}
   1203
   1204	if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
   1205		result = ci_get_dependency_volt_by_clk(hwmgr,
   1206				hwmgr->dyn_state.mvdd_dependency_on_mclk,
   1207				memory_clock,
   1208				&memory_level->MinMvdd);
   1209		PP_ASSERT_WITH_CODE((0 == result),
   1210			"can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
   1211	}
   1212
   1213	memory_level->MinVddcPhases = 1;
   1214
   1215	if (data->vddc_phase_shed_control) {
   1216		ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
   1217				memory_clock, &memory_level->MinVddcPhases);
   1218	}
   1219
   1220	memory_level->EnabledForThrottle = 1;
   1221	memory_level->EnabledForActivity = 1;
   1222	memory_level->UpH = data->current_profile_setting.mclk_up_hyst;
   1223	memory_level->DownH = data->current_profile_setting.mclk_down_hyst;
   1224	memory_level->VoltageDownH = 0;
   1225
   1226	/* Indicates maximum activity level for this performance level.*/
   1227	memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
   1228	memory_level->StutterEnable = 0;
   1229	memory_level->StrobeEnable = 0;
   1230	memory_level->EdcReadEnable = 0;
   1231	memory_level->EdcWriteEnable = 0;
   1232	memory_level->RttEnable = 0;
   1233
   1234	/* default set to low watermark. Highest level will be set to high later.*/
   1235	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
   1236
   1237	data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
   1238	data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
   1239
   1240	/* stutter mode not support on ci */
   1241
   1242	/* decide strobe mode*/
   1243	memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
   1244		(memory_clock <= mclk_strobe_mode_threshold);
   1245
   1246	/* decide EDC mode and memory clock ratio*/
   1247	if (data->is_memory_gddr5) {
   1248		memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
   1249					memory_level->StrobeEnable);
   1250
   1251		if ((mclk_edc_enable_threshold != 0) &&
   1252				(memory_clock > mclk_edc_enable_threshold)) {
   1253			memory_level->EdcReadEnable = 1;
   1254		}
   1255
   1256		if ((mclk_edc_wr_enable_threshold != 0) &&
   1257				(memory_clock > mclk_edc_wr_enable_threshold)) {
   1258			memory_level->EdcWriteEnable = 1;
   1259		}
   1260
   1261		if (memory_level->StrobeEnable) {
   1262			if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
   1263					((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
   1264				dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
   1265			else
   1266				dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
   1267		} else
   1268			dll_state_on = data->dll_default_on;
   1269	} else {
   1270		memory_level->StrobeRatio =
   1271			ci_get_ddr3_mclk_frequency_ratio(memory_clock);
   1272		dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
   1273	}
   1274
   1275	result = ci_calculate_mclk_params(hwmgr,
   1276		memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
   1277
   1278	if (0 == result) {
   1279		memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
   1280		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
   1281		memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
   1282		memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
   1283		/* MCLK frequency in units of 10KHz*/
   1284		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
   1285		/* Indicates maximum activity level for this performance level.*/
   1286		CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
   1287		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
   1288		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
   1289		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
   1290		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
   1291		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
   1292		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
   1293		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
   1294		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
   1295		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
   1296	}
   1297
   1298	return result;
   1299}
   1300
   1301static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
   1302{
   1303	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1304	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   1305	struct smu7_dpm_table *dpm_table = &data->dpm_table;
   1306	int result;
   1307	struct amdgpu_device *adev = hwmgr->adev;
   1308	uint32_t dev_id;
   1309
   1310	uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
   1311	uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
   1312	SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
   1313	uint32_t i;
   1314
   1315	memset(levels, 0x00, level_array_size);
   1316
   1317	for (i = 0; i < dpm_table->mclk_table.count; i++) {
   1318		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
   1319			"can not populate memory level as memory clock is zero", return -EINVAL);
   1320		result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
   1321			&(smu_data->smc_state_table.MemoryLevel[i]));
   1322		if (0 != result)
   1323			return result;
   1324	}
   1325
   1326	smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
   1327
   1328	dev_id = adev->pdev->device;
   1329
   1330	if ((dpm_table->mclk_table.count >= 2)
   1331		&& ((dev_id == 0x67B0) ||  (dev_id == 0x67B1))) {
   1332		smu_data->smc_state_table.MemoryLevel[1].MinVddci =
   1333				smu_data->smc_state_table.MemoryLevel[0].MinVddci;
   1334		smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
   1335				smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
   1336	}
   1337	smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
   1338	CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
   1339
   1340	smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
   1341	data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
   1342	smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
   1343
   1344	result = ci_copy_bytes_to_smc(hwmgr,
   1345		level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
   1346		SMC_RAM_END);
   1347
   1348	return result;
   1349}
   1350
   1351static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
   1352					SMU7_Discrete_VoltageLevel *voltage)
   1353{
   1354	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1355
   1356	uint32_t i = 0;
   1357
   1358	if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
   1359		/* find mvdd value which clock is more than request */
   1360		for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
   1361			if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
   1362				/* Always round to higher voltage. */
   1363				voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
   1364				break;
   1365			}
   1366		}
   1367
   1368		PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
   1369			"MVDD Voltage is outside the supported range.", return -EINVAL);
   1370
   1371	} else {
   1372		return -EINVAL;
   1373	}
   1374
   1375	return 0;
   1376}
   1377
   1378static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
   1379	SMU7_Discrete_DpmTable *table)
   1380{
   1381	int result = 0;
   1382	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1383	struct pp_atomctrl_clock_dividers_vi dividers;
   1384
   1385	SMU7_Discrete_VoltageLevel voltage_level;
   1386	uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
   1387	uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
   1388	uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
   1389	uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
   1390
   1391
   1392	/* The ACPI state should not do DPM on DC (or ever).*/
   1393	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
   1394
   1395	if (data->acpi_vddc)
   1396		table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
   1397	else
   1398		table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
   1399
   1400	table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
   1401	/* assign zero for now*/
   1402	table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
   1403
   1404	/* get the engine clock dividers for this clock value*/
   1405	result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
   1406		table->ACPILevel.SclkFrequency,  &dividers);
   1407
   1408	PP_ASSERT_WITH_CODE(result == 0,
   1409		"Error retrieving Engine Clock dividers from VBIOS.", return result);
   1410
   1411	/* divider ID for required SCLK*/
   1412	table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
   1413	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
   1414	table->ACPILevel.DeepSleepDivId = 0;
   1415
   1416	spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
   1417							CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
   1418	spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
   1419							CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
   1420	spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
   1421							CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
   1422
   1423	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
   1424	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
   1425	table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
   1426	table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
   1427	table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
   1428	table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
   1429	table->ACPILevel.CcPwrDynRm = 0;
   1430	table->ACPILevel.CcPwrDynRm1 = 0;
   1431
   1432	/* For various features to be enabled/disabled while this level is active.*/
   1433	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
   1434	/* SCLK frequency in units of 10KHz*/
   1435	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
   1436	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
   1437	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
   1438	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
   1439	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
   1440	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
   1441	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
   1442	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
   1443	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
   1444
   1445
   1446	/* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
   1447	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
   1448	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
   1449
   1450	if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
   1451		table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
   1452	else {
   1453		if (data->acpi_vddci != 0)
   1454			table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
   1455		else
   1456			table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
   1457	}
   1458
   1459	if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
   1460		table->MemoryACPILevel.MinMvdd =
   1461			PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
   1462	else
   1463		table->MemoryACPILevel.MinMvdd = 0;
   1464
   1465	/* Force reset on DLL*/
   1466	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
   1467		MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
   1468	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
   1469		MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
   1470
   1471	/* Disable DLL in ACPIState*/
   1472	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
   1473		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
   1474	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
   1475		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
   1476
   1477	/* Enable DLL bypass signal*/
   1478	dll_cntl            = PHM_SET_FIELD(dll_cntl,
   1479		DLL_CNTL, MRDCK0_BYPASS, 0);
   1480	dll_cntl            = PHM_SET_FIELD(dll_cntl,
   1481		DLL_CNTL, MRDCK1_BYPASS, 0);
   1482
   1483	table->MemoryACPILevel.DllCntl            =
   1484		PP_HOST_TO_SMC_UL(dll_cntl);
   1485	table->MemoryACPILevel.MclkPwrmgtCntl     =
   1486		PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
   1487	table->MemoryACPILevel.MpllAdFuncCntl     =
   1488		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
   1489	table->MemoryACPILevel.MpllDqFuncCntl     =
   1490		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
   1491	table->MemoryACPILevel.MpllFuncCntl       =
   1492		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
   1493	table->MemoryACPILevel.MpllFuncCntl_1     =
   1494		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
   1495	table->MemoryACPILevel.MpllFuncCntl_2     =
   1496		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
   1497	table->MemoryACPILevel.MpllSs1            =
   1498		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
   1499	table->MemoryACPILevel.MpllSs2            =
   1500		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
   1501
   1502	table->MemoryACPILevel.EnabledForThrottle = 0;
   1503	table->MemoryACPILevel.EnabledForActivity = 0;
   1504	table->MemoryACPILevel.UpH = 0;
   1505	table->MemoryACPILevel.DownH = 100;
   1506	table->MemoryACPILevel.VoltageDownH = 0;
   1507	/* Indicates maximum activity level for this performance level.*/
   1508	table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
   1509
   1510	table->MemoryACPILevel.StutterEnable = 0;
   1511	table->MemoryACPILevel.StrobeEnable = 0;
   1512	table->MemoryACPILevel.EdcReadEnable = 0;
   1513	table->MemoryACPILevel.EdcWriteEnable = 0;
   1514	table->MemoryACPILevel.RttEnable = 0;
   1515
   1516	return result;
   1517}
   1518
   1519static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
   1520					SMU7_Discrete_DpmTable *table)
   1521{
   1522	int result = 0;
   1523	uint8_t count;
   1524	struct pp_atomctrl_clock_dividers_vi dividers;
   1525	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
   1526		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
   1527
   1528	table->UvdLevelCount = (uint8_t)(uvd_table->count);
   1529
   1530	for (count = 0; count < table->UvdLevelCount; count++) {
   1531		table->UvdLevel[count].VclkFrequency =
   1532					uvd_table->entries[count].vclk;
   1533		table->UvdLevel[count].DclkFrequency =
   1534					uvd_table->entries[count].dclk;
   1535		table->UvdLevel[count].MinVddc =
   1536					uvd_table->entries[count].v * VOLTAGE_SCALE;
   1537		table->UvdLevel[count].MinVddcPhases = 1;
   1538
   1539		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
   1540				table->UvdLevel[count].VclkFrequency, &dividers);
   1541		PP_ASSERT_WITH_CODE((0 == result),
   1542				"can not find divide id for Vclk clock", return result);
   1543
   1544		table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
   1545
   1546		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
   1547				table->UvdLevel[count].DclkFrequency, &dividers);
   1548		PP_ASSERT_WITH_CODE((0 == result),
   1549				"can not find divide id for Dclk clock", return result);
   1550
   1551		table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
   1552		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
   1553		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
   1554		CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
   1555	}
   1556
   1557	return result;
   1558}
   1559
   1560static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
   1561		SMU7_Discrete_DpmTable *table)
   1562{
   1563	int result = -EINVAL;
   1564	uint8_t count;
   1565	struct pp_atomctrl_clock_dividers_vi dividers;
   1566	struct phm_vce_clock_voltage_dependency_table *vce_table =
   1567				hwmgr->dyn_state.vce_clock_voltage_dependency_table;
   1568
   1569	table->VceLevelCount = (uint8_t)(vce_table->count);
   1570	table->VceBootLevel = 0;
   1571
   1572	for (count = 0; count < table->VceLevelCount; count++) {
   1573		table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
   1574		table->VceLevel[count].MinVoltage =
   1575				vce_table->entries[count].v * VOLTAGE_SCALE;
   1576		table->VceLevel[count].MinPhases = 1;
   1577
   1578		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
   1579				table->VceLevel[count].Frequency, &dividers);
   1580		PP_ASSERT_WITH_CODE((0 == result),
   1581				"can not find divide id for VCE engine clock",
   1582				return result);
   1583
   1584		table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
   1585
   1586		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
   1587		CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
   1588	}
   1589	return result;
   1590}
   1591
   1592static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
   1593					SMU7_Discrete_DpmTable *table)
   1594{
   1595	int result = -EINVAL;
   1596	uint8_t count;
   1597	struct pp_atomctrl_clock_dividers_vi dividers;
   1598	struct phm_acp_clock_voltage_dependency_table *acp_table =
   1599				hwmgr->dyn_state.acp_clock_voltage_dependency_table;
   1600
   1601	table->AcpLevelCount = (uint8_t)(acp_table->count);
   1602	table->AcpBootLevel = 0;
   1603
   1604	for (count = 0; count < table->AcpLevelCount; count++) {
   1605		table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
   1606		table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
   1607		table->AcpLevel[count].MinPhases = 1;
   1608
   1609		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
   1610				table->AcpLevel[count].Frequency, &dividers);
   1611		PP_ASSERT_WITH_CODE((0 == result),
   1612				"can not find divide id for engine clock", return result);
   1613
   1614		table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
   1615
   1616		CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
   1617		CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
   1618	}
   1619	return result;
   1620}
   1621
   1622static int ci_populate_memory_timing_parameters(
   1623		struct pp_hwmgr *hwmgr,
   1624		uint32_t engine_clock,
   1625		uint32_t memory_clock,
   1626		struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
   1627		)
   1628{
   1629	uint32_t dramTiming;
   1630	uint32_t dramTiming2;
   1631	uint32_t burstTime;
   1632	int result;
   1633
   1634	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
   1635				engine_clock, memory_clock);
   1636
   1637	PP_ASSERT_WITH_CODE(result == 0,
   1638		"Error calling VBIOS to set DRAM_TIMING.", return result);
   1639
   1640	dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
   1641	dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
   1642	burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
   1643
   1644	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
   1645	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
   1646	arb_regs->McArbBurstTime = (uint8_t)burstTime;
   1647
   1648	return 0;
   1649}
   1650
   1651static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
   1652{
   1653	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1654	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   1655	int result = 0;
   1656	SMU7_Discrete_MCArbDramTimingTable  arb_regs;
   1657	uint32_t i, j;
   1658
   1659	memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
   1660
   1661	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
   1662		for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
   1663			result = ci_populate_memory_timing_parameters
   1664				(hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
   1665				 data->dpm_table.mclk_table.dpm_levels[j].value,
   1666				 &arb_regs.entries[i][j]);
   1667
   1668			if (0 != result)
   1669				break;
   1670		}
   1671	}
   1672
   1673	if (0 == result) {
   1674		result = ci_copy_bytes_to_smc(
   1675				hwmgr,
   1676				smu_data->arb_table_start,
   1677				(uint8_t *)&arb_regs,
   1678				sizeof(SMU7_Discrete_MCArbDramTimingTable),
   1679				SMC_RAM_END
   1680				);
   1681	}
   1682
   1683	return result;
   1684}
   1685
   1686static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
   1687			SMU7_Discrete_DpmTable *table)
   1688{
   1689	int result = 0;
   1690	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1691	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   1692
   1693	table->GraphicsBootLevel = 0;
   1694	table->MemoryBootLevel = 0;
   1695
   1696	/* find boot level from dpm table*/
   1697	result = phm_find_boot_level(&(data->dpm_table.sclk_table),
   1698			data->vbios_boot_state.sclk_bootup_value,
   1699			(uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
   1700
   1701	if (0 != result) {
   1702		smu_data->smc_state_table.GraphicsBootLevel = 0;
   1703		pr_err("VBIOS did not find boot engine clock value in dependency table. Using Graphics DPM level 0!\n");
   1704		result = 0;
   1705	}
   1706
   1707	result = phm_find_boot_level(&(data->dpm_table.mclk_table),
   1708		data->vbios_boot_state.mclk_bootup_value,
   1709		(uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
   1710
   1711	if (0 != result) {
   1712		smu_data->smc_state_table.MemoryBootLevel = 0;
   1713		pr_err("VBIOS did not find boot engine clock value in dependency table. Using Memory DPM level 0!\n");
   1714		result = 0;
   1715	}
   1716
   1717	table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
   1718	table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
   1719	table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
   1720
   1721	return result;
   1722}
   1723
   1724static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
   1725				 SMU7_Discrete_MCRegisters *mc_reg_table)
   1726{
   1727	const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
   1728
   1729	uint32_t i, j;
   1730
   1731	for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
   1732		if (smu_data->mc_reg_table.validflag & 1<<j) {
   1733			PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
   1734				"Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
   1735			mc_reg_table->address[i].s0 =
   1736				PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
   1737			mc_reg_table->address[i].s1 =
   1738				PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
   1739			i++;
   1740		}
   1741	}
   1742
   1743	mc_reg_table->last = (uint8_t)i;
   1744
   1745	return 0;
   1746}
   1747
   1748static void ci_convert_mc_registers(
   1749	const struct ci_mc_reg_entry *entry,
   1750	SMU7_Discrete_MCRegisterSet *data,
   1751	uint32_t num_entries, uint32_t valid_flag)
   1752{
   1753	uint32_t i, j;
   1754
   1755	for (i = 0, j = 0; j < num_entries; j++) {
   1756		if (valid_flag & 1<<j) {
   1757			data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
   1758			i++;
   1759		}
   1760	}
   1761}
   1762
   1763static int ci_convert_mc_reg_table_entry_to_smc(
   1764		struct pp_hwmgr *hwmgr,
   1765		const uint32_t memory_clock,
   1766		SMU7_Discrete_MCRegisterSet *mc_reg_table_data
   1767		)
   1768{
   1769	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   1770	uint32_t i = 0;
   1771
   1772	for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
   1773		if (memory_clock <=
   1774			smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
   1775			break;
   1776		}
   1777	}
   1778
   1779	if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
   1780		--i;
   1781
   1782	ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
   1783				mc_reg_table_data, smu_data->mc_reg_table.last,
   1784				smu_data->mc_reg_table.validflag);
   1785
   1786	return 0;
   1787}
   1788
   1789static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
   1790		SMU7_Discrete_MCRegisters *mc_regs)
   1791{
   1792	int result = 0;
   1793	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1794	int res;
   1795	uint32_t i;
   1796
   1797	for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
   1798		res = ci_convert_mc_reg_table_entry_to_smc(
   1799				hwmgr,
   1800				data->dpm_table.mclk_table.dpm_levels[i].value,
   1801				&mc_regs->data[i]
   1802				);
   1803
   1804		if (0 != res)
   1805			result = res;
   1806	}
   1807
   1808	return result;
   1809}
   1810
   1811static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
   1812{
   1813	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   1814	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1815	uint32_t address;
   1816	int32_t result;
   1817
   1818	if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
   1819		return 0;
   1820
   1821
   1822	memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
   1823
   1824	result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
   1825
   1826	if (result != 0)
   1827		return result;
   1828
   1829	address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
   1830
   1831	return  ci_copy_bytes_to_smc(hwmgr, address,
   1832				 (uint8_t *)&smu_data->mc_regs.data[0],
   1833				sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
   1834				SMC_RAM_END);
   1835}
   1836
   1837static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
   1838{
   1839	int result;
   1840	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   1841
   1842	memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
   1843	result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
   1844	PP_ASSERT_WITH_CODE(0 == result,
   1845		"Failed to initialize MCRegTable for the MC register addresses!", return result;);
   1846
   1847	result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
   1848	PP_ASSERT_WITH_CODE(0 == result,
   1849		"Failed to initialize MCRegTable for driver state!", return result;);
   1850
   1851	return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
   1852			(uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
   1853}
   1854
   1855static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
   1856{
   1857	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1858	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   1859	uint8_t count, level;
   1860
   1861	count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
   1862
   1863	for (level = 0; level < count; level++) {
   1864		if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
   1865			 >= data->vbios_boot_state.sclk_bootup_value) {
   1866			smu_data->smc_state_table.GraphicsBootLevel = level;
   1867			break;
   1868		}
   1869	}
   1870
   1871	count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
   1872
   1873	for (level = 0; level < count; level++) {
   1874		if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
   1875			>= data->vbios_boot_state.mclk_bootup_value) {
   1876			smu_data->smc_state_table.MemoryBootLevel = level;
   1877			break;
   1878		}
   1879	}
   1880
   1881	return 0;
   1882}
   1883
   1884static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
   1885					    SMU7_Discrete_DpmTable *table)
   1886{
   1887	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1888
   1889	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
   1890		table->SVI2Enable = 1;
   1891	else
   1892		table->SVI2Enable = 0;
   1893	return 0;
   1894}
   1895
   1896static int ci_start_smc(struct pp_hwmgr *hwmgr)
   1897{
   1898	/* set smc instruct start point at 0x0 */
   1899	ci_program_jump_on_start(hwmgr);
   1900
   1901	/* enable smc clock */
   1902	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
   1903
   1904	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
   1905
   1906	PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
   1907				 INTERRUPTS_ENABLED, 1);
   1908
   1909	return 0;
   1910}
   1911
   1912static int ci_populate_vr_config(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
   1913{
   1914	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1915	uint16_t config;
   1916
   1917	config = VR_SVI2_PLANE_1;
   1918	table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
   1919
   1920	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
   1921		config = VR_SVI2_PLANE_2;
   1922		table->VRConfig |= config;
   1923	} else {
   1924		pr_info("VDDCshould be on SVI2 controller!");
   1925	}
   1926
   1927	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
   1928		config = VR_SVI2_PLANE_2;
   1929		table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
   1930	} else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
   1931		config = VR_SMIO_PATTERN_1;
   1932		table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
   1933	}
   1934
   1935	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
   1936		config = VR_SMIO_PATTERN_2;
   1937		table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
   1938	}
   1939
   1940	return 0;
   1941}
   1942
   1943static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
   1944{
   1945	int result;
   1946	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1947	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   1948	SMU7_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
   1949	struct pp_atomctrl_gpio_pin_assignment gpio_pin;
   1950	u32 i;
   1951
   1952	ci_initialize_power_tune_defaults(hwmgr);
   1953	memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
   1954
   1955	if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
   1956		ci_populate_smc_voltage_tables(hwmgr, table);
   1957
   1958	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1959			PHM_PlatformCaps_AutomaticDCTransition))
   1960		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
   1961
   1962
   1963	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1964			PHM_PlatformCaps_StepVddc))
   1965		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
   1966
   1967	if (data->is_memory_gddr5)
   1968		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
   1969
   1970	if (data->ulv_supported) {
   1971		result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
   1972		PP_ASSERT_WITH_CODE(0 == result,
   1973			"Failed to initialize ULV state!", return result);
   1974
   1975		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   1976			ixCG_ULV_PARAMETER, 0x40035);
   1977	}
   1978
   1979	result = ci_populate_all_graphic_levels(hwmgr);
   1980	PP_ASSERT_WITH_CODE(0 == result,
   1981		"Failed to initialize Graphics Level!", return result);
   1982
   1983	result = ci_populate_all_memory_levels(hwmgr);
   1984	PP_ASSERT_WITH_CODE(0 == result,
   1985		"Failed to initialize Memory Level!", return result);
   1986
   1987	result = ci_populate_smc_link_level(hwmgr, table);
   1988	PP_ASSERT_WITH_CODE(0 == result,
   1989		"Failed to initialize Link Level!", return result);
   1990
   1991	result = ci_populate_smc_acpi_level(hwmgr, table);
   1992	PP_ASSERT_WITH_CODE(0 == result,
   1993		"Failed to initialize ACPI Level!", return result);
   1994
   1995	result = ci_populate_smc_vce_level(hwmgr, table);
   1996	PP_ASSERT_WITH_CODE(0 == result,
   1997		"Failed to initialize VCE Level!", return result);
   1998
   1999	result = ci_populate_smc_acp_level(hwmgr, table);
   2000	PP_ASSERT_WITH_CODE(0 == result,
   2001		"Failed to initialize ACP Level!", return result);
   2002
   2003	/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
   2004	/* need to populate the  ARB settings for the initial state. */
   2005	result = ci_program_memory_timing_parameters(hwmgr);
   2006	PP_ASSERT_WITH_CODE(0 == result,
   2007		"Failed to Write ARB settings for the initial state.", return result);
   2008
   2009	result = ci_populate_smc_uvd_level(hwmgr, table);
   2010	PP_ASSERT_WITH_CODE(0 == result,
   2011		"Failed to initialize UVD Level!", return result);
   2012
   2013	table->UvdBootLevel  = 0;
   2014	table->VceBootLevel  = 0;
   2015	table->AcpBootLevel  = 0;
   2016	table->SamuBootLevel  = 0;
   2017
   2018	table->GraphicsBootLevel = 0;
   2019	table->MemoryBootLevel = 0;
   2020
   2021	result = ci_populate_smc_boot_level(hwmgr, table);
   2022	PP_ASSERT_WITH_CODE(0 == result,
   2023		"Failed to initialize Boot Level!", return result);
   2024
   2025	result = ci_populate_smc_initial_state(hwmgr);
   2026	PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
   2027
   2028	result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
   2029	PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
   2030
   2031	table->UVDInterval = 1;
   2032	table->VCEInterval = 1;
   2033	table->ACPInterval = 1;
   2034	table->SAMUInterval = 1;
   2035	table->GraphicsVoltageChangeEnable  = 1;
   2036	table->GraphicsThermThrottleEnable  = 1;
   2037	table->GraphicsInterval = 1;
   2038	table->VoltageInterval  = 1;
   2039	table->ThermalInterval  = 1;
   2040
   2041	table->TemperatureLimitHigh =
   2042		(data->thermal_temp_setting.temperature_high *
   2043		 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
   2044	table->TemperatureLimitLow =
   2045		(data->thermal_temp_setting.temperature_low *
   2046		SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
   2047
   2048	table->MemoryVoltageChangeEnable  = 1;
   2049	table->MemoryInterval  = 1;
   2050	table->VoltageResponseTime  = 0;
   2051	table->VddcVddciDelta = 4000;
   2052	table->PhaseResponseTime  = 0;
   2053	table->MemoryThermThrottleEnable  = 1;
   2054
   2055	PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
   2056			"There must be 1 or more PCIE levels defined in PPTable.",
   2057			return -EINVAL);
   2058
   2059	table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
   2060	table->PCIeGenInterval = 1;
   2061
   2062	result = ci_populate_vr_config(hwmgr, table);
   2063	PP_ASSERT_WITH_CODE(0 == result,
   2064			"Failed to populate VRConfig setting!", return result);
   2065	data->vr_config = table->VRConfig;
   2066
   2067	ci_populate_smc_svi2_config(hwmgr, table);
   2068
   2069	for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
   2070		CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
   2071
   2072	table->ThermGpio  = 17;
   2073	table->SclkStepSize = 0x4000;
   2074	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
   2075		table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
   2076		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
   2077				PHM_PlatformCaps_RegulatorHot);
   2078	} else {
   2079		table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
   2080		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
   2081				PHM_PlatformCaps_RegulatorHot);
   2082	}
   2083
   2084	table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
   2085
   2086	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
   2087	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
   2088	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
   2089	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
   2090	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
   2091	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
   2092	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
   2093	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
   2094	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
   2095	table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
   2096	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
   2097	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
   2098
   2099	table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
   2100	table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
   2101	table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
   2102
   2103	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
   2104	result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
   2105					offsetof(SMU7_Discrete_DpmTable, SystemFlags),
   2106					(uint8_t *)&(table->SystemFlags),
   2107					sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
   2108					SMC_RAM_END);
   2109
   2110	PP_ASSERT_WITH_CODE(0 == result,
   2111		"Failed to upload dpm data to SMC memory!", return result;);
   2112
   2113	result = ci_populate_initial_mc_reg_table(hwmgr);
   2114	PP_ASSERT_WITH_CODE((0 == result),
   2115		"Failed to populate initialize MC Reg table!", return result);
   2116
   2117	result = ci_populate_pm_fuses(hwmgr);
   2118	PP_ASSERT_WITH_CODE(0 == result,
   2119			"Failed to  populate PM fuses to SMC memory!", return result);
   2120
   2121	ci_start_smc(hwmgr);
   2122
   2123	return 0;
   2124}
   2125
   2126static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
   2127{
   2128	struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   2129	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
   2130	uint32_t duty100;
   2131	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
   2132	uint16_t fdo_min, slope1, slope2;
   2133	uint32_t reference_clock;
   2134	int res;
   2135	uint64_t tmp64;
   2136
   2137	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
   2138		return 0;
   2139
   2140	if (hwmgr->thermal_controller.fanInfo.bNoFan) {
   2141		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
   2142			PHM_PlatformCaps_MicrocodeFanControl);
   2143		return 0;
   2144	}
   2145
   2146	if (0 == ci_data->fan_table_start) {
   2147		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
   2148		return 0;
   2149	}
   2150
   2151	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
   2152
   2153	if (0 == duty100) {
   2154		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
   2155		return 0;
   2156	}
   2157
   2158	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
   2159	do_div(tmp64, 10000);
   2160	fdo_min = (uint16_t)tmp64;
   2161
   2162	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
   2163	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
   2164
   2165	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
   2166	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
   2167
   2168	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
   2169	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
   2170
   2171	fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
   2172	fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
   2173	fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
   2174
   2175	fan_table.Slope1 = cpu_to_be16(slope1);
   2176	fan_table.Slope2 = cpu_to_be16(slope2);
   2177
   2178	fan_table.FdoMin = cpu_to_be16(fdo_min);
   2179
   2180	fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
   2181
   2182	fan_table.HystUp = cpu_to_be16(1);
   2183
   2184	fan_table.HystSlope = cpu_to_be16(1);
   2185
   2186	fan_table.TempRespLim = cpu_to_be16(5);
   2187
   2188	reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
   2189
   2190	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
   2191
   2192	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
   2193
   2194	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
   2195
   2196	res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
   2197
   2198	return res;
   2199}
   2200
   2201static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
   2202{
   2203	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2204
   2205	if (data->need_update_smu7_dpm_table &
   2206			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
   2207		return ci_program_memory_timing_parameters(hwmgr);
   2208
   2209	return 0;
   2210}
   2211
   2212static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
   2213{
   2214	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2215	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   2216
   2217	int result = 0;
   2218	uint32_t low_sclk_interrupt_threshold = 0;
   2219
   2220	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   2221			PHM_PlatformCaps_SclkThrottleLowNotification)
   2222		&& (data->low_sclk_interrupt_threshold != 0)) {
   2223		low_sclk_interrupt_threshold =
   2224				data->low_sclk_interrupt_threshold;
   2225
   2226		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
   2227
   2228		result = ci_copy_bytes_to_smc(
   2229				hwmgr,
   2230				smu_data->dpm_table_start +
   2231				offsetof(SMU7_Discrete_DpmTable,
   2232					LowSclkInterruptT),
   2233				(uint8_t *)&low_sclk_interrupt_threshold,
   2234				sizeof(uint32_t),
   2235				SMC_RAM_END);
   2236	}
   2237
   2238	result = ci_update_and_upload_mc_reg_table(hwmgr);
   2239
   2240	PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
   2241
   2242	result = ci_program_mem_timing_parameters(hwmgr);
   2243	PP_ASSERT_WITH_CODE((result == 0),
   2244			"Failed to program memory timing parameters!",
   2245			);
   2246
   2247	return result;
   2248}
   2249
   2250static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
   2251{
   2252	switch (type) {
   2253	case SMU_SoftRegisters:
   2254		switch (member) {
   2255		case HandshakeDisables:
   2256			return offsetof(SMU7_SoftRegisters, HandshakeDisables);
   2257		case VoltageChangeTimeout:
   2258			return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
   2259		case AverageGraphicsActivity:
   2260			return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
   2261		case AverageMemoryActivity:
   2262			return offsetof(SMU7_SoftRegisters, AverageMemoryA);
   2263		case PreVBlankGap:
   2264			return offsetof(SMU7_SoftRegisters, PreVBlankGap);
   2265		case VBlankTimeout:
   2266			return offsetof(SMU7_SoftRegisters, VBlankTimeout);
   2267		case DRAM_LOG_ADDR_H:
   2268			return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
   2269		case DRAM_LOG_ADDR_L:
   2270			return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
   2271		case DRAM_LOG_PHY_ADDR_H:
   2272			return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
   2273		case DRAM_LOG_PHY_ADDR_L:
   2274			return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
   2275		case DRAM_LOG_BUFF_SIZE:
   2276			return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
   2277		}
   2278		break;
   2279	case SMU_Discrete_DpmTable:
   2280		switch (member) {
   2281		case LowSclkInterruptThreshold:
   2282			return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
   2283		}
   2284		break;
   2285	}
   2286	pr_debug("can't get the offset of type %x member %x\n", type, member);
   2287	return 0;
   2288}
   2289
   2290static uint32_t ci_get_mac_definition(uint32_t value)
   2291{
   2292	switch (value) {
   2293	case SMU_MAX_LEVELS_GRAPHICS:
   2294		return SMU7_MAX_LEVELS_GRAPHICS;
   2295	case SMU_MAX_LEVELS_MEMORY:
   2296		return SMU7_MAX_LEVELS_MEMORY;
   2297	case SMU_MAX_LEVELS_LINK:
   2298		return SMU7_MAX_LEVELS_LINK;
   2299	case SMU_MAX_ENTRIES_SMIO:
   2300		return SMU7_MAX_ENTRIES_SMIO;
   2301	case SMU_MAX_LEVELS_VDDC:
   2302		return SMU7_MAX_LEVELS_VDDC;
   2303	case SMU_MAX_LEVELS_VDDCI:
   2304		return SMU7_MAX_LEVELS_VDDCI;
   2305	case SMU_MAX_LEVELS_MVDD:
   2306		return SMU7_MAX_LEVELS_MVDD;
   2307	}
   2308
   2309	pr_debug("can't get the mac of %x\n", value);
   2310	return 0;
   2311}
   2312
   2313static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
   2314{
   2315	uint32_t byte_count, start_addr;
   2316	uint8_t *src;
   2317	uint32_t data;
   2318
   2319	struct cgs_firmware_info info = {0};
   2320
   2321	cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
   2322
   2323	hwmgr->is_kicker = info.is_kicker;
   2324	hwmgr->smu_version = info.version;
   2325	byte_count = info.image_size;
   2326	src = (uint8_t *)info.kptr;
   2327	start_addr = info.ucode_start_address;
   2328
   2329	if  (byte_count > SMC_RAM_END) {
   2330		pr_err("SMC address is beyond the SMC RAM area.\n");
   2331		return -EINVAL;
   2332	}
   2333
   2334	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
   2335	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
   2336
   2337	for (; byte_count >= 4; byte_count -= 4) {
   2338		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
   2339		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
   2340		src += 4;
   2341	}
   2342	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
   2343
   2344	if (0 != byte_count) {
   2345		pr_err("SMC size must be divisible by 4\n");
   2346		return -EINVAL;
   2347	}
   2348
   2349	return 0;
   2350}
   2351
   2352static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
   2353{
   2354	if (ci_is_smc_ram_running(hwmgr)) {
   2355		pr_info("smc is running, no need to load smc firmware\n");
   2356		return 0;
   2357	}
   2358	PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
   2359			boot_seq_done, 1);
   2360	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
   2361			pre_fetcher_en, 1);
   2362
   2363	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
   2364	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
   2365	return ci_load_smc_ucode(hwmgr);
   2366}
   2367
   2368static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
   2369{
   2370	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2371	struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   2372
   2373	uint32_t tmp = 0;
   2374	int result;
   2375	bool error = false;
   2376
   2377	if (ci_upload_firmware(hwmgr))
   2378		return -EINVAL;
   2379
   2380	result = ci_read_smc_sram_dword(hwmgr,
   2381				SMU7_FIRMWARE_HEADER_LOCATION +
   2382				offsetof(SMU7_Firmware_Header, DpmTable),
   2383				&tmp, SMC_RAM_END);
   2384
   2385	if (0 == result)
   2386		ci_data->dpm_table_start = tmp;
   2387
   2388	error |= (0 != result);
   2389
   2390	result = ci_read_smc_sram_dword(hwmgr,
   2391				SMU7_FIRMWARE_HEADER_LOCATION +
   2392				offsetof(SMU7_Firmware_Header, SoftRegisters),
   2393				&tmp, SMC_RAM_END);
   2394
   2395	if (0 == result) {
   2396		data->soft_regs_start = tmp;
   2397		ci_data->soft_regs_start = tmp;
   2398	}
   2399
   2400	error |= (0 != result);
   2401
   2402	result = ci_read_smc_sram_dword(hwmgr,
   2403				SMU7_FIRMWARE_HEADER_LOCATION +
   2404				offsetof(SMU7_Firmware_Header, mcRegisterTable),
   2405				&tmp, SMC_RAM_END);
   2406
   2407	if (0 == result)
   2408		ci_data->mc_reg_table_start = tmp;
   2409
   2410	result = ci_read_smc_sram_dword(hwmgr,
   2411				SMU7_FIRMWARE_HEADER_LOCATION +
   2412				offsetof(SMU7_Firmware_Header, FanTable),
   2413				&tmp, SMC_RAM_END);
   2414
   2415	if (0 == result)
   2416		ci_data->fan_table_start = tmp;
   2417
   2418	error |= (0 != result);
   2419
   2420	result = ci_read_smc_sram_dword(hwmgr,
   2421				SMU7_FIRMWARE_HEADER_LOCATION +
   2422				offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
   2423				&tmp, SMC_RAM_END);
   2424
   2425	if (0 == result)
   2426		ci_data->arb_table_start = tmp;
   2427
   2428	error |= (0 != result);
   2429
   2430	result = ci_read_smc_sram_dword(hwmgr,
   2431				SMU7_FIRMWARE_HEADER_LOCATION +
   2432				offsetof(SMU7_Firmware_Header, Version),
   2433				&tmp, SMC_RAM_END);
   2434
   2435	if (0 == result)
   2436		hwmgr->microcode_version_info.SMC = tmp;
   2437
   2438	error |= (0 != result);
   2439
   2440	return error ? 1 : 0;
   2441}
   2442
   2443static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
   2444{
   2445	return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
   2446}
   2447
   2448static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
   2449{
   2450	bool result = true;
   2451
   2452	switch (in_reg) {
   2453	case  mmMC_SEQ_RAS_TIMING:
   2454		*out_reg = mmMC_SEQ_RAS_TIMING_LP;
   2455		break;
   2456
   2457	case  mmMC_SEQ_DLL_STBY:
   2458		*out_reg = mmMC_SEQ_DLL_STBY_LP;
   2459		break;
   2460
   2461	case  mmMC_SEQ_G5PDX_CMD0:
   2462		*out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
   2463		break;
   2464
   2465	case  mmMC_SEQ_G5PDX_CMD1:
   2466		*out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
   2467		break;
   2468
   2469	case  mmMC_SEQ_G5PDX_CTRL:
   2470		*out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
   2471		break;
   2472
   2473	case mmMC_SEQ_CAS_TIMING:
   2474		*out_reg = mmMC_SEQ_CAS_TIMING_LP;
   2475		break;
   2476
   2477	case mmMC_SEQ_MISC_TIMING:
   2478		*out_reg = mmMC_SEQ_MISC_TIMING_LP;
   2479		break;
   2480
   2481	case mmMC_SEQ_MISC_TIMING2:
   2482		*out_reg = mmMC_SEQ_MISC_TIMING2_LP;
   2483		break;
   2484
   2485	case mmMC_SEQ_PMG_DVS_CMD:
   2486		*out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
   2487		break;
   2488
   2489	case mmMC_SEQ_PMG_DVS_CTL:
   2490		*out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
   2491		break;
   2492
   2493	case mmMC_SEQ_RD_CTL_D0:
   2494		*out_reg = mmMC_SEQ_RD_CTL_D0_LP;
   2495		break;
   2496
   2497	case mmMC_SEQ_RD_CTL_D1:
   2498		*out_reg = mmMC_SEQ_RD_CTL_D1_LP;
   2499		break;
   2500
   2501	case mmMC_SEQ_WR_CTL_D0:
   2502		*out_reg = mmMC_SEQ_WR_CTL_D0_LP;
   2503		break;
   2504
   2505	case mmMC_SEQ_WR_CTL_D1:
   2506		*out_reg = mmMC_SEQ_WR_CTL_D1_LP;
   2507		break;
   2508
   2509	case mmMC_PMG_CMD_EMRS:
   2510		*out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
   2511		break;
   2512
   2513	case mmMC_PMG_CMD_MRS:
   2514		*out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
   2515		break;
   2516
   2517	case mmMC_PMG_CMD_MRS1:
   2518		*out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
   2519		break;
   2520
   2521	case mmMC_SEQ_PMG_TIMING:
   2522		*out_reg = mmMC_SEQ_PMG_TIMING_LP;
   2523		break;
   2524
   2525	case mmMC_PMG_CMD_MRS2:
   2526		*out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
   2527		break;
   2528
   2529	case mmMC_SEQ_WR_CTL_2:
   2530		*out_reg = mmMC_SEQ_WR_CTL_2_LP;
   2531		break;
   2532
   2533	default:
   2534		result = false;
   2535		break;
   2536	}
   2537
   2538	return result;
   2539}
   2540
   2541static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
   2542{
   2543	uint32_t i;
   2544	uint16_t address;
   2545
   2546	for (i = 0; i < table->last; i++) {
   2547		table->mc_reg_address[i].s0 =
   2548			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
   2549			? address : table->mc_reg_address[i].s1;
   2550	}
   2551	return 0;
   2552}
   2553
   2554static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
   2555					struct ci_mc_reg_table *ni_table)
   2556{
   2557	uint8_t i, j;
   2558
   2559	PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
   2560		"Invalid VramInfo table.", return -EINVAL);
   2561	PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
   2562		"Invalid VramInfo table.", return -EINVAL);
   2563
   2564	for (i = 0; i < table->last; i++)
   2565		ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
   2566
   2567	ni_table->last = table->last;
   2568
   2569	for (i = 0; i < table->num_entries; i++) {
   2570		ni_table->mc_reg_table_entry[i].mclk_max =
   2571			table->mc_reg_table_entry[i].mclk_max;
   2572		for (j = 0; j < table->last; j++) {
   2573			ni_table->mc_reg_table_entry[i].mc_data[j] =
   2574				table->mc_reg_table_entry[i].mc_data[j];
   2575		}
   2576	}
   2577
   2578	ni_table->num_entries = table->num_entries;
   2579
   2580	return 0;
   2581}
   2582
   2583static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
   2584					struct ci_mc_reg_table *table)
   2585{
   2586	uint8_t i, j, k;
   2587	uint32_t temp_reg;
   2588	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2589
   2590	for (i = 0, j = table->last; i < table->last; i++) {
   2591		PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
   2592			"Invalid VramInfo table.", return -EINVAL);
   2593
   2594		switch (table->mc_reg_address[i].s1) {
   2595
   2596		case mmMC_SEQ_MISC1:
   2597			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
   2598			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
   2599			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
   2600			for (k = 0; k < table->num_entries; k++) {
   2601				table->mc_reg_table_entry[k].mc_data[j] =
   2602					((temp_reg & 0xffff0000)) |
   2603					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
   2604			}
   2605			j++;
   2606
   2607			PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
   2608				"Invalid VramInfo table.", return -EINVAL);
   2609			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
   2610			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
   2611			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
   2612			for (k = 0; k < table->num_entries; k++) {
   2613				table->mc_reg_table_entry[k].mc_data[j] =
   2614					(temp_reg & 0xffff0000) |
   2615					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
   2616
   2617				if (!data->is_memory_gddr5)
   2618					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
   2619			}
   2620			j++;
   2621
   2622			if (!data->is_memory_gddr5) {
   2623				PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
   2624					"Invalid VramInfo table.", return -EINVAL);
   2625				table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
   2626				table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
   2627				for (k = 0; k < table->num_entries; k++) {
   2628					table->mc_reg_table_entry[k].mc_data[j] =
   2629						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
   2630				}
   2631				j++;
   2632			}
   2633
   2634			break;
   2635
   2636		case mmMC_SEQ_RESERVE_M:
   2637			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
   2638			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
   2639			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
   2640			for (k = 0; k < table->num_entries; k++) {
   2641				table->mc_reg_table_entry[k].mc_data[j] =
   2642					(temp_reg & 0xffff0000) |
   2643					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
   2644			}
   2645			j++;
   2646			break;
   2647
   2648		default:
   2649			break;
   2650		}
   2651
   2652	}
   2653
   2654	table->last = j;
   2655
   2656	return 0;
   2657}
   2658
   2659static int ci_set_valid_flag(struct ci_mc_reg_table *table)
   2660{
   2661	uint8_t i, j;
   2662
   2663	for (i = 0; i < table->last; i++) {
   2664		for (j = 1; j < table->num_entries; j++) {
   2665			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
   2666				table->mc_reg_table_entry[j].mc_data[i]) {
   2667				table->validflag |= (1 << i);
   2668				break;
   2669			}
   2670		}
   2671	}
   2672
   2673	return 0;
   2674}
   2675
   2676static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
   2677{
   2678	int result;
   2679	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
   2680	pp_atomctrl_mc_reg_table *table;
   2681	struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
   2682	uint8_t module_index = ci_get_memory_modile_index(hwmgr);
   2683
   2684	table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
   2685
   2686	if (NULL == table)
   2687		return -ENOMEM;
   2688
   2689	/* Program additional LP registers that are no longer programmed by VBIOS */
   2690	cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
   2691	cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
   2692	cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
   2693	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
   2694	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
   2695	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
   2696	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
   2697	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
   2698	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
   2699	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
   2700	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
   2701	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
   2702	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
   2703	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
   2704	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
   2705	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
   2706	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
   2707	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
   2708	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
   2709	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
   2710
   2711	result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
   2712
   2713	if (0 == result)
   2714		result = ci_copy_vbios_smc_reg_table(table, ni_table);
   2715
   2716	if (0 == result) {
   2717		ci_set_s0_mc_reg_index(ni_table);
   2718		result = ci_set_mc_special_registers(hwmgr, ni_table);
   2719	}
   2720
   2721	if (0 == result)
   2722		ci_set_valid_flag(ni_table);
   2723
   2724	kfree(table);
   2725
   2726	return result;
   2727}
   2728
   2729static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
   2730{
   2731	return ci_is_smc_ram_running(hwmgr);
   2732}
   2733
   2734static int ci_smu_init(struct pp_hwmgr *hwmgr)
   2735{
   2736	struct ci_smumgr *ci_priv = NULL;
   2737
   2738	ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
   2739
   2740	if (ci_priv == NULL)
   2741		return -ENOMEM;
   2742
   2743	hwmgr->smu_backend = ci_priv;
   2744
   2745	return 0;
   2746}
   2747
   2748static int ci_smu_fini(struct pp_hwmgr *hwmgr)
   2749{
   2750	kfree(hwmgr->smu_backend);
   2751	hwmgr->smu_backend = NULL;
   2752	return 0;
   2753}
   2754
   2755static int ci_start_smu(struct pp_hwmgr *hwmgr)
   2756{
   2757	return 0;
   2758}
   2759
   2760static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
   2761				void *profile_setting)
   2762{
   2763	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2764	struct ci_smumgr *smu_data = (struct ci_smumgr *)
   2765			(hwmgr->smu_backend);
   2766	struct profile_mode_setting *setting;
   2767	struct SMU7_Discrete_GraphicsLevel *levels =
   2768			smu_data->smc_state_table.GraphicsLevel;
   2769	uint32_t array = smu_data->dpm_table_start +
   2770			offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
   2771
   2772	uint32_t mclk_array = smu_data->dpm_table_start +
   2773			offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
   2774	struct SMU7_Discrete_MemoryLevel *mclk_levels =
   2775			smu_data->smc_state_table.MemoryLevel;
   2776	uint32_t i;
   2777	uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
   2778
   2779	if (profile_setting == NULL)
   2780		return -EINVAL;
   2781
   2782	setting = (struct profile_mode_setting *)profile_setting;
   2783
   2784	if (setting->bupdate_sclk) {
   2785		if (!data->sclk_dpm_key_disabled)
   2786			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
   2787		for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
   2788			if (levels[i].ActivityLevel !=
   2789				cpu_to_be16(setting->sclk_activity)) {
   2790				levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
   2791
   2792				clk_activity_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
   2793						+ offsetof(SMU7_Discrete_GraphicsLevel, ActivityLevel);
   2794				offset = clk_activity_offset & ~0x3;
   2795				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
   2796				tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
   2797				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
   2798
   2799			}
   2800			if (levels[i].UpH != setting->sclk_up_hyst ||
   2801				levels[i].DownH != setting->sclk_down_hyst) {
   2802				levels[i].UpH = setting->sclk_up_hyst;
   2803				levels[i].DownH = setting->sclk_down_hyst;
   2804				up_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
   2805						+ offsetof(SMU7_Discrete_GraphicsLevel, UpH);
   2806				down_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
   2807						+ offsetof(SMU7_Discrete_GraphicsLevel, DownH);
   2808				offset = up_hyst_offset & ~0x3;
   2809				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
   2810				tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpH, sizeof(uint8_t));
   2811				tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownH, sizeof(uint8_t));
   2812				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
   2813			}
   2814		}
   2815		if (!data->sclk_dpm_key_disabled)
   2816			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
   2817	}
   2818
   2819	if (setting->bupdate_mclk) {
   2820		if (!data->mclk_dpm_key_disabled)
   2821			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
   2822		for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
   2823			if (mclk_levels[i].ActivityLevel !=
   2824				cpu_to_be16(setting->mclk_activity)) {
   2825				mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
   2826
   2827				clk_activity_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
   2828						+ offsetof(SMU7_Discrete_MemoryLevel, ActivityLevel);
   2829				offset = clk_activity_offset & ~0x3;
   2830				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
   2831				tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
   2832				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
   2833
   2834			}
   2835			if (mclk_levels[i].UpH != setting->mclk_up_hyst ||
   2836				mclk_levels[i].DownH != setting->mclk_down_hyst) {
   2837				mclk_levels[i].UpH = setting->mclk_up_hyst;
   2838				mclk_levels[i].DownH = setting->mclk_down_hyst;
   2839				up_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
   2840						+ offsetof(SMU7_Discrete_MemoryLevel, UpH);
   2841				down_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
   2842						+ offsetof(SMU7_Discrete_MemoryLevel, DownH);
   2843				offset = up_hyst_offset & ~0x3;
   2844				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
   2845				tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpH, sizeof(uint8_t));
   2846				tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownH, sizeof(uint8_t));
   2847				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
   2848			}
   2849		}
   2850		if (!data->mclk_dpm_key_disabled)
   2851			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
   2852	}
   2853	return 0;
   2854}
   2855
   2856static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
   2857{
   2858	struct amdgpu_device *adev = hwmgr->adev;
   2859	struct smu7_hwmgr *data = hwmgr->backend;
   2860	struct ci_smumgr *smu_data = hwmgr->smu_backend;
   2861	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
   2862			hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
   2863	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
   2864					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
   2865					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
   2866					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
   2867	uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
   2868						hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
   2869	int32_t i;
   2870
   2871	if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0)
   2872		smu_data->smc_state_table.UvdBootLevel = 0;
   2873	else
   2874		smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1;
   2875
   2876	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
   2877				UvdBootLevel, smu_data->smc_state_table.UvdBootLevel);
   2878
   2879	data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
   2880
   2881	for (i = uvd_table->count - 1; i >= 0; i--) {
   2882		if (uvd_table->entries[i].v <= max_vddc)
   2883			data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
   2884		if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
   2885			break;
   2886	}
   2887	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
   2888				data->dpm_level_enable_mask.uvd_dpm_enable_mask,
   2889				NULL);
   2890
   2891	return 0;
   2892}
   2893
   2894static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
   2895{
   2896	struct amdgpu_device *adev = hwmgr->adev;
   2897	struct smu7_hwmgr *data = hwmgr->backend;
   2898	struct phm_vce_clock_voltage_dependency_table *vce_table =
   2899			hwmgr->dyn_state.vce_clock_voltage_dependency_table;
   2900	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
   2901					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
   2902					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
   2903					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
   2904	uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
   2905						hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
   2906	int32_t i;
   2907
   2908	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
   2909				VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/
   2910
   2911	data->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
   2912
   2913	for (i = vce_table->count - 1; i >= 0; i--) {
   2914		if (vce_table->entries[i].v <= max_vddc)
   2915			data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
   2916		if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
   2917			break;
   2918	}
   2919	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
   2920				data->dpm_level_enable_mask.vce_dpm_enable_mask,
   2921				NULL);
   2922
   2923	return 0;
   2924}
   2925
   2926static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
   2927{
   2928	switch (type) {
   2929	case SMU_UVD_TABLE:
   2930		ci_update_uvd_smc_table(hwmgr);
   2931		break;
   2932	case SMU_VCE_TABLE:
   2933		ci_update_vce_smc_table(hwmgr);
   2934		break;
   2935	default:
   2936		break;
   2937	}
   2938	return 0;
   2939}
   2940
   2941static void ci_reset_smc(struct pp_hwmgr *hwmgr)
   2942{
   2943	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   2944				  SMC_SYSCON_RESET_CNTL,
   2945				  rst_reg, 1);
   2946}
   2947
   2948
   2949static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
   2950{
   2951	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   2952				  SMC_SYSCON_CLOCK_CNTL_0,
   2953				  ck_disable, 1);
   2954}
   2955
   2956static int ci_stop_smc(struct pp_hwmgr *hwmgr)
   2957{
   2958	ci_reset_smc(hwmgr);
   2959	ci_stop_smc_clock(hwmgr);
   2960
   2961	return 0;
   2962}
   2963
   2964const struct pp_smumgr_func ci_smu_funcs = {
   2965	.name = "ci_smu",
   2966	.smu_init = ci_smu_init,
   2967	.smu_fini = ci_smu_fini,
   2968	.start_smu = ci_start_smu,
   2969	.check_fw_load_finish = NULL,
   2970	.request_smu_load_fw = NULL,
   2971	.request_smu_load_specific_fw = NULL,
   2972	.send_msg_to_smc = ci_send_msg_to_smc,
   2973	.send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
   2974	.get_argument = smu7_get_argument,
   2975	.download_pptable_settings = NULL,
   2976	.upload_pptable_settings = NULL,
   2977	.get_offsetof = ci_get_offsetof,
   2978	.process_firmware_header = ci_process_firmware_header,
   2979	.init_smc_table = ci_init_smc_table,
   2980	.update_sclk_threshold = ci_update_sclk_threshold,
   2981	.thermal_setup_fan_table = ci_thermal_setup_fan_table,
   2982	.populate_all_graphic_levels = ci_populate_all_graphic_levels,
   2983	.populate_all_memory_levels = ci_populate_all_memory_levels,
   2984	.get_mac_definition = ci_get_mac_definition,
   2985	.initialize_mc_reg_table = ci_initialize_mc_reg_table,
   2986	.is_dpm_running = ci_is_dpm_running,
   2987	.update_dpm_settings = ci_update_dpm_settings,
   2988	.update_smc_table = ci_update_smc_table,
   2989	.stop_smc = ci_stop_smc,
   2990};