cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

smu7_hwmgr.c (191176B)


      1/*
      2 * Copyright 2015 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23#include "pp_debug.h"
     24#include <linux/delay.h>
     25#include <linux/fb.h>
     26#include <linux/module.h>
     27#include <linux/pci.h>
     28#include <linux/slab.h>
     29#include <asm/div64.h>
     30#if IS_ENABLED(CONFIG_X86_64)
     31#include <asm/intel-family.h>
     32#endif
     33#include <drm/amdgpu_drm.h>
     34#include "ppatomctrl.h"
     35#include "atombios.h"
     36#include "pptable_v1_0.h"
     37#include "pppcielanes.h"
     38#include "amd_pcie_helpers.h"
     39#include "hardwaremanager.h"
     40#include "process_pptables_v1_0.h"
     41#include "cgs_common.h"
     42
     43#include "smu7_common.h"
     44
     45#include "hwmgr.h"
     46#include "smu7_hwmgr.h"
     47#include "smu_ucode_xfer_vi.h"
     48#include "smu7_powertune.h"
     49#include "smu7_dyn_defaults.h"
     50#include "smu7_thermal.h"
     51#include "smu7_clockpowergating.h"
     52#include "processpptables.h"
     53#include "pp_thermal.h"
     54#include "smu7_baco.h"
     55#include "smu7_smumgr.h"
     56#include "polaris10_smumgr.h"
     57
     58#include "ivsrcid/ivsrcid_vislands30.h"
     59
     60#define MC_CG_ARB_FREQ_F0           0x0a
     61#define MC_CG_ARB_FREQ_F1           0x0b
     62#define MC_CG_ARB_FREQ_F2           0x0c
     63#define MC_CG_ARB_FREQ_F3           0x0d
     64
     65#define MC_CG_SEQ_DRAMCONF_S0       0x05
     66#define MC_CG_SEQ_DRAMCONF_S1       0x06
     67#define MC_CG_SEQ_YCLK_SUSPEND      0x04
     68#define MC_CG_SEQ_YCLK_RESUME       0x0a
     69
     70#define SMC_CG_IND_START            0xc0030000
     71#define SMC_CG_IND_END              0xc0040000
     72
     73#define MEM_FREQ_LOW_LATENCY        25000
     74#define MEM_FREQ_HIGH_LATENCY       80000
     75
     76#define MEM_LATENCY_HIGH            45
     77#define MEM_LATENCY_LOW             35
     78#define MEM_LATENCY_ERR             0xFFFF
     79
     80#define MC_SEQ_MISC0_GDDR5_SHIFT 28
     81#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
     82#define MC_SEQ_MISC0_GDDR5_VALUE 5
     83
     84#define PCIE_BUS_CLK                10000
     85#define TCLK                        (PCIE_BUS_CLK / 10)
     86
     87static struct profile_mode_setting smu7_profiling[7] =
     88					{{0, 0, 0, 0, 0, 0, 0, 0},
     89					 {1, 0, 100, 30, 1, 0, 100, 10},
     90					 {1, 10, 0, 30, 0, 0, 0, 0},
     91					 {0, 0, 0, 0, 1, 10, 16, 31},
     92					 {1, 0, 11, 50, 1, 0, 100, 10},
     93					 {1, 0, 5, 30, 0, 0, 0, 0},
     94					 {0, 0, 0, 0, 0, 0, 0, 0},
     95					};
     96
     97#define PPSMC_MSG_SetVBITimeout_VEGAM    ((uint16_t) 0x310)
     98
     99#define ixPWR_SVI2_PLANE1_LOAD                     0xC0200280
    100#define PWR_SVI2_PLANE1_LOAD__PSI1_MASK                    0x00000020L
    101#define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK                 0x00000040L
    102#define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT                  0x00000005
    103#define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT               0x00000006
    104
    105#define STRAP_EVV_REVISION_MSB		2211
    106#define STRAP_EVV_REVISION_LSB		2208
    107
    108/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
    109enum DPM_EVENT_SRC {
    110	DPM_EVENT_SRC_ANALOG = 0,
    111	DPM_EVENT_SRC_EXTERNAL = 1,
    112	DPM_EVENT_SRC_DIGITAL = 2,
    113	DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
    114	DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
    115};
    116
    117#define ixDIDT_SQ_EDC_CTRL                         0x0013
    118#define ixDIDT_SQ_EDC_THRESHOLD                    0x0014
    119#define ixDIDT_SQ_EDC_STALL_PATTERN_1_2            0x0015
    120#define ixDIDT_SQ_EDC_STALL_PATTERN_3_4            0x0016
    121#define ixDIDT_SQ_EDC_STALL_PATTERN_5_6            0x0017
    122#define ixDIDT_SQ_EDC_STALL_PATTERN_7              0x0018
    123
    124#define ixDIDT_TD_EDC_CTRL                         0x0053
    125#define ixDIDT_TD_EDC_THRESHOLD                    0x0054
    126#define ixDIDT_TD_EDC_STALL_PATTERN_1_2            0x0055
    127#define ixDIDT_TD_EDC_STALL_PATTERN_3_4            0x0056
    128#define ixDIDT_TD_EDC_STALL_PATTERN_5_6            0x0057
    129#define ixDIDT_TD_EDC_STALL_PATTERN_7              0x0058
    130
    131#define ixDIDT_TCP_EDC_CTRL                        0x0073
    132#define ixDIDT_TCP_EDC_THRESHOLD                   0x0074
    133#define ixDIDT_TCP_EDC_STALL_PATTERN_1_2           0x0075
    134#define ixDIDT_TCP_EDC_STALL_PATTERN_3_4           0x0076
    135#define ixDIDT_TCP_EDC_STALL_PATTERN_5_6           0x0077
    136#define ixDIDT_TCP_EDC_STALL_PATTERN_7             0x0078
    137
    138#define ixDIDT_DB_EDC_CTRL                         0x0033
    139#define ixDIDT_DB_EDC_THRESHOLD                    0x0034
    140#define ixDIDT_DB_EDC_STALL_PATTERN_1_2            0x0035
    141#define ixDIDT_DB_EDC_STALL_PATTERN_3_4            0x0036
    142#define ixDIDT_DB_EDC_STALL_PATTERN_5_6            0x0037
    143#define ixDIDT_DB_EDC_STALL_PATTERN_7              0x0038
    144
    145uint32_t DIDTEDCConfig_P12[] = {
    146    ixDIDT_SQ_EDC_STALL_PATTERN_1_2,
    147    ixDIDT_SQ_EDC_STALL_PATTERN_3_4,
    148    ixDIDT_SQ_EDC_STALL_PATTERN_5_6,
    149    ixDIDT_SQ_EDC_STALL_PATTERN_7,
    150    ixDIDT_SQ_EDC_THRESHOLD,
    151    ixDIDT_SQ_EDC_CTRL,
    152    ixDIDT_TD_EDC_STALL_PATTERN_1_2,
    153    ixDIDT_TD_EDC_STALL_PATTERN_3_4,
    154    ixDIDT_TD_EDC_STALL_PATTERN_5_6,
    155    ixDIDT_TD_EDC_STALL_PATTERN_7,
    156    ixDIDT_TD_EDC_THRESHOLD,
    157    ixDIDT_TD_EDC_CTRL,
    158    ixDIDT_TCP_EDC_STALL_PATTERN_1_2,
    159    ixDIDT_TCP_EDC_STALL_PATTERN_3_4,
    160    ixDIDT_TCP_EDC_STALL_PATTERN_5_6,
    161    ixDIDT_TCP_EDC_STALL_PATTERN_7,
    162    ixDIDT_TCP_EDC_THRESHOLD,
    163    ixDIDT_TCP_EDC_CTRL,
    164    ixDIDT_DB_EDC_STALL_PATTERN_1_2,
    165    ixDIDT_DB_EDC_STALL_PATTERN_3_4,
    166    ixDIDT_DB_EDC_STALL_PATTERN_5_6,
    167    ixDIDT_DB_EDC_STALL_PATTERN_7,
    168    ixDIDT_DB_EDC_THRESHOLD,
    169    ixDIDT_DB_EDC_CTRL,
    170    0xFFFFFFFF // End of list
    171};
    172
    173static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
    174static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
    175		enum pp_clock_type type, uint32_t mask);
    176static int smu7_notify_has_display(struct pp_hwmgr *hwmgr);
    177
    178static struct smu7_power_state *cast_phw_smu7_power_state(
    179				  struct pp_hw_power_state *hw_ps)
    180{
    181	PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
    182				"Invalid Powerstate Type!",
    183				 return NULL);
    184
    185	return (struct smu7_power_state *)hw_ps;
    186}
    187
    188static const struct smu7_power_state *cast_const_phw_smu7_power_state(
    189				 const struct pp_hw_power_state *hw_ps)
    190{
    191	PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
    192				"Invalid Powerstate Type!",
    193				 return NULL);
    194
    195	return (const struct smu7_power_state *)hw_ps;
    196}
    197
    198/**
    199 * smu7_get_mc_microcode_version - Find the MC microcode version and store it in the HwMgr struct
    200 *
    201 * @hwmgr:  the address of the powerplay hardware manager.
    202 * Return:   always 0
    203 */
    204static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
    205{
    206	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
    207
    208	hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
    209
    210	return 0;
    211}
    212
    213static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
    214{
    215	uint32_t speedCntl = 0;
    216
    217	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
    218	speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
    219			ixPCIE_LC_SPEED_CNTL);
    220	return((uint16_t)PHM_GET_FIELD(speedCntl,
    221			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
    222}
    223
    224static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
    225{
    226	uint32_t link_width;
    227
    228	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
    229	link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
    230			PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
    231
    232	PP_ASSERT_WITH_CODE((7 >= link_width),
    233			"Invalid PCIe lane width!", return 0);
    234
    235	return decode_pcie_lane_width(link_width);
    236}
    237
    238/**
    239 * smu7_enable_smc_voltage_controller - Enable voltage control
    240 *
    241 * @hwmgr:  the address of the powerplay hardware manager.
    242 * Return:   always PP_Result_OK
    243 */
    244static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
    245{
    246	if (hwmgr->chip_id >= CHIP_POLARIS10 &&
    247	    hwmgr->chip_id <= CHIP_VEGAM) {
    248		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
    249				CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
    250		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
    251				CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
    252	}
    253
    254	if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
    255		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
    256
    257	return 0;
    258}
    259
    260/**
    261 * smu7_voltage_control - Checks if we want to support voltage control
    262 *
    263 * @hwmgr:  the address of the powerplay hardware manager.
    264 */
    265static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
    266{
    267	const struct smu7_hwmgr *data =
    268			(const struct smu7_hwmgr *)(hwmgr->backend);
    269
    270	return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
    271}
    272
    273/**
    274 * smu7_enable_voltage_control - Enable voltage control
    275 *
    276 * @hwmgr:  the address of the powerplay hardware manager.
    277 * Return:   always 0
    278 */
    279static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
    280{
    281	/* enable voltage control */
    282	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
    283			GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
    284
    285	return 0;
    286}
    287
    288static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
    289		struct phm_clock_voltage_dependency_table *voltage_dependency_table
    290		)
    291{
    292	uint32_t i;
    293
    294	PP_ASSERT_WITH_CODE((NULL != voltage_table),
    295			"Voltage Dependency Table empty.", return -EINVAL;);
    296
    297	voltage_table->mask_low = 0;
    298	voltage_table->phase_delay = 0;
    299	voltage_table->count = voltage_dependency_table->count;
    300
    301	for (i = 0; i < voltage_dependency_table->count; i++) {
    302		voltage_table->entries[i].value =
    303			voltage_dependency_table->entries[i].v;
    304		voltage_table->entries[i].smio_low = 0;
    305	}
    306
    307	return 0;
    308}
    309
    310
    311/**
    312 * smu7_construct_voltage_tables - Create Voltage Tables.
    313 *
    314 * @hwmgr:  the address of the powerplay hardware manager.
    315 * Return:   always 0
    316 */
    317static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
    318{
    319	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    320	struct phm_ppt_v1_information *table_info =
    321			(struct phm_ppt_v1_information *)hwmgr->pptable;
    322	int result = 0;
    323	uint32_t tmp;
    324
    325	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
    326		result = atomctrl_get_voltage_table_v3(hwmgr,
    327				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
    328				&(data->mvdd_voltage_table));
    329		PP_ASSERT_WITH_CODE((0 == result),
    330				"Failed to retrieve MVDD table.",
    331				return result);
    332	} else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
    333		if (hwmgr->pp_table_version == PP_TABLE_V1)
    334			result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
    335					table_info->vdd_dep_on_mclk);
    336		else if (hwmgr->pp_table_version == PP_TABLE_V0)
    337			result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
    338					hwmgr->dyn_state.mvdd_dependency_on_mclk);
    339
    340		PP_ASSERT_WITH_CODE((0 == result),
    341				"Failed to retrieve SVI2 MVDD table from dependency table.",
    342				return result;);
    343	}
    344
    345	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
    346		result = atomctrl_get_voltage_table_v3(hwmgr,
    347				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
    348				&(data->vddci_voltage_table));
    349		PP_ASSERT_WITH_CODE((0 == result),
    350				"Failed to retrieve VDDCI table.",
    351				return result);
    352	} else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
    353		if (hwmgr->pp_table_version == PP_TABLE_V1)
    354			result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
    355					table_info->vdd_dep_on_mclk);
    356		else if (hwmgr->pp_table_version == PP_TABLE_V0)
    357			result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
    358					hwmgr->dyn_state.vddci_dependency_on_mclk);
    359		PP_ASSERT_WITH_CODE((0 == result),
    360				"Failed to retrieve SVI2 VDDCI table from dependency table.",
    361				return result);
    362	}
    363
    364	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
    365		/* VDDGFX has only SVI2 voltage control */
    366		result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
    367					table_info->vddgfx_lookup_table);
    368		PP_ASSERT_WITH_CODE((0 == result),
    369			"Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
    370	}
    371
    372
    373	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
    374		result = atomctrl_get_voltage_table_v3(hwmgr,
    375					VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
    376					&data->vddc_voltage_table);
    377		PP_ASSERT_WITH_CODE((0 == result),
    378			"Failed to retrieve VDDC table.", return result;);
    379	} else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
    380
    381		if (hwmgr->pp_table_version == PP_TABLE_V0)
    382			result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
    383					hwmgr->dyn_state.vddc_dependency_on_mclk);
    384		else if (hwmgr->pp_table_version == PP_TABLE_V1)
    385			result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
    386				table_info->vddc_lookup_table);
    387
    388		PP_ASSERT_WITH_CODE((0 == result),
    389			"Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
    390	}
    391
    392	tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
    393	PP_ASSERT_WITH_CODE(
    394			(data->vddc_voltage_table.count <= tmp),
    395		"Too many voltage values for VDDC. Trimming to fit state table.",
    396			phm_trim_voltage_table_to_fit_state_table(tmp,
    397						&(data->vddc_voltage_table)));
    398
    399	tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
    400	PP_ASSERT_WITH_CODE(
    401			(data->vddgfx_voltage_table.count <= tmp),
    402		"Too many voltage values for VDDC. Trimming to fit state table.",
    403			phm_trim_voltage_table_to_fit_state_table(tmp,
    404						&(data->vddgfx_voltage_table)));
    405
    406	tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
    407	PP_ASSERT_WITH_CODE(
    408			(data->vddci_voltage_table.count <= tmp),
    409		"Too many voltage values for VDDCI. Trimming to fit state table.",
    410			phm_trim_voltage_table_to_fit_state_table(tmp,
    411					&(data->vddci_voltage_table)));
    412
    413	tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
    414	PP_ASSERT_WITH_CODE(
    415			(data->mvdd_voltage_table.count <= tmp),
    416		"Too many voltage values for MVDD. Trimming to fit state table.",
    417			phm_trim_voltage_table_to_fit_state_table(tmp,
    418						&(data->mvdd_voltage_table)));
    419
    420	return 0;
    421}
    422
    423/**
    424 * smu7_program_static_screen_threshold_parameters - Programs static screed detection parameters
    425 *
    426 * @hwmgr:  the address of the powerplay hardware manager.
    427 * Return:   always 0
    428 */
    429static int smu7_program_static_screen_threshold_parameters(
    430							struct pp_hwmgr *hwmgr)
    431{
    432	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    433
    434	/* Set static screen threshold unit */
    435	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
    436			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
    437			data->static_screen_threshold_unit);
    438	/* Set static screen threshold */
    439	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
    440			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
    441			data->static_screen_threshold);
    442
    443	return 0;
    444}
    445
    446/**
    447 * smu7_enable_display_gap - Setup display gap for glitch free memory clock switching.
    448 *
    449 * @hwmgr:  the address of the powerplay hardware manager.
    450 * Return:   always  0
    451 */
    452static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
    453{
    454	uint32_t display_gap =
    455			cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
    456					ixCG_DISPLAY_GAP_CNTL);
    457
    458	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
    459			DISP_GAP, DISPLAY_GAP_IGNORE);
    460
    461	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
    462			DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
    463
    464	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
    465			ixCG_DISPLAY_GAP_CNTL, display_gap);
    466
    467	return 0;
    468}
    469
    470/**
    471 * smu7_program_voting_clients - Programs activity state transition voting clients
    472 *
    473 * @hwmgr:  the address of the powerplay hardware manager.
    474 * Return:   always  0
    475 */
    476static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
    477{
    478	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    479	int i;
    480
    481	/* Clear reset for voting clients before enabling DPM */
    482	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
    483			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
    484	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
    485			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
    486
    487	for (i = 0; i < 8; i++)
    488		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
    489					ixCG_FREQ_TRAN_VOTING_0 + i * 4,
    490					data->voting_rights_clients[i]);
    491	return 0;
    492}
    493
    494static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
    495{
    496	int i;
    497
    498	/* Reset voting clients before disabling DPM */
    499	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
    500			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
    501	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
    502			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
    503
    504	for (i = 0; i < 8; i++)
    505		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
    506				ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
    507
    508	return 0;
    509}
    510
    511/* Copy one arb setting to another and then switch the active set.
    512 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
    513 */
    514static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
    515		uint32_t arb_src, uint32_t arb_dest)
    516{
    517	uint32_t mc_arb_dram_timing;
    518	uint32_t mc_arb_dram_timing2;
    519	uint32_t burst_time;
    520	uint32_t mc_cg_config;
    521
    522	switch (arb_src) {
    523	case MC_CG_ARB_FREQ_F0:
    524		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
    525		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
    526		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
    527		break;
    528	case MC_CG_ARB_FREQ_F1:
    529		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
    530		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
    531		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
    532		break;
    533	default:
    534		return -EINVAL;
    535	}
    536
    537	switch (arb_dest) {
    538	case MC_CG_ARB_FREQ_F0:
    539		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
    540		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
    541		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
    542		break;
    543	case MC_CG_ARB_FREQ_F1:
    544		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
    545		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
    546		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
    547		break;
    548	default:
    549		return -EINVAL;
    550	}
    551
    552	mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
    553	mc_cg_config |= 0x0000000F;
    554	cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
    555	PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
    556
    557	return 0;
    558}
    559
    560static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
    561{
    562	return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
    563}
    564
    565/**
    566 * smu7_initial_switch_from_arbf0_to_f1 - Initial switch from ARB F0->F1
    567 *
    568 * @hwmgr:  the address of the powerplay hardware manager.
    569 * Return:   always 0
    570 * This function is to be called from the SetPowerState table.
    571 */
    572static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
    573{
    574	return smu7_copy_and_switch_arb_sets(hwmgr,
    575			MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
    576}
    577
    578static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
    579{
    580	uint32_t tmp;
    581
    582	tmp = (cgs_read_ind_register(hwmgr->device,
    583			CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
    584			0x0000ff00) >> 8;
    585
    586	if (tmp == MC_CG_ARB_FREQ_F0)
    587		return 0;
    588
    589	return smu7_copy_and_switch_arb_sets(hwmgr,
    590			tmp, MC_CG_ARB_FREQ_F0);
    591}
    592
    593static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr)
    594{
    595	struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
    596	uint16_t pcie_gen = 0;
    597
    598	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 &&
    599	    adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4)
    600		pcie_gen = 3;
    601	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 &&
    602		adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
    603		pcie_gen = 2;
    604	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 &&
    605		adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2)
    606		pcie_gen = 1;
    607	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 &&
    608		adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1)
    609		pcie_gen = 0;
    610
    611	return pcie_gen;
    612}
    613
    614static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr)
    615{
    616	struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
    617	uint16_t pcie_width = 0;
    618
    619	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
    620		pcie_width = 16;
    621	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
    622		pcie_width = 12;
    623	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
    624		pcie_width = 8;
    625	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
    626		pcie_width = 4;
    627	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
    628		pcie_width = 2;
    629	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
    630		pcie_width = 1;
    631
    632	return pcie_width;
    633}
    634
    635static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
    636{
    637	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    638
    639	struct phm_ppt_v1_information *table_info =
    640			(struct phm_ppt_v1_information *)(hwmgr->pptable);
    641	struct phm_ppt_v1_pcie_table *pcie_table = NULL;
    642
    643	uint32_t i, max_entry;
    644	uint32_t tmp;
    645
    646	PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
    647			data->use_pcie_power_saving_levels), "No pcie performance levels!",
    648			return -EINVAL);
    649
    650	if (table_info != NULL)
    651		pcie_table = table_info->pcie_table;
    652
    653	if (data->use_pcie_performance_levels &&
    654			!data->use_pcie_power_saving_levels) {
    655		data->pcie_gen_power_saving = data->pcie_gen_performance;
    656		data->pcie_lane_power_saving = data->pcie_lane_performance;
    657	} else if (!data->use_pcie_performance_levels &&
    658			data->use_pcie_power_saving_levels) {
    659		data->pcie_gen_performance = data->pcie_gen_power_saving;
    660		data->pcie_lane_performance = data->pcie_lane_power_saving;
    661	}
    662	tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
    663	phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
    664					tmp,
    665					MAX_REGULAR_DPM_NUMBER);
    666
    667	if (pcie_table != NULL) {
    668		/* max_entry is used to make sure we reserve one PCIE level
    669		 * for boot level (fix for A+A PSPP issue).
    670		 * If PCIE table from PPTable have ULV entry + 8 entries,
    671		 * then ignore the last entry.*/
    672		max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
    673		for (i = 1; i < max_entry; i++) {
    674			phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
    675					get_pcie_gen_support(data->pcie_gen_cap,
    676							pcie_table->entries[i].gen_speed),
    677					get_pcie_lane_support(data->pcie_lane_cap,
    678							pcie_table->entries[i].lane_width));
    679		}
    680		data->dpm_table.pcie_speed_table.count = max_entry - 1;
    681		smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
    682	} else {
    683		/* Hardcode Pcie Table */
    684		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
    685				get_pcie_gen_support(data->pcie_gen_cap,
    686						PP_Min_PCIEGen),
    687				get_pcie_lane_support(data->pcie_lane_cap,
    688						PP_Max_PCIELane));
    689		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
    690				get_pcie_gen_support(data->pcie_gen_cap,
    691						PP_Min_PCIEGen),
    692				get_pcie_lane_support(data->pcie_lane_cap,
    693						PP_Max_PCIELane));
    694		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
    695				get_pcie_gen_support(data->pcie_gen_cap,
    696						PP_Max_PCIEGen),
    697				get_pcie_lane_support(data->pcie_lane_cap,
    698						PP_Max_PCIELane));
    699		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
    700				get_pcie_gen_support(data->pcie_gen_cap,
    701						PP_Max_PCIEGen),
    702				get_pcie_lane_support(data->pcie_lane_cap,
    703						PP_Max_PCIELane));
    704		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
    705				get_pcie_gen_support(data->pcie_gen_cap,
    706						PP_Max_PCIEGen),
    707				get_pcie_lane_support(data->pcie_lane_cap,
    708						PP_Max_PCIELane));
    709		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
    710				get_pcie_gen_support(data->pcie_gen_cap,
    711						PP_Max_PCIEGen),
    712				get_pcie_lane_support(data->pcie_lane_cap,
    713						PP_Max_PCIELane));
    714
    715		data->dpm_table.pcie_speed_table.count = 6;
    716	}
    717	/* Populate last level for boot PCIE level, but do not increment count. */
    718	if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
    719		for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
    720			phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
    721				get_pcie_gen_support(data->pcie_gen_cap,
    722						PP_Max_PCIEGen),
    723				data->vbios_boot_state.pcie_lane_bootup_value);
    724	} else {
    725		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
    726			data->dpm_table.pcie_speed_table.count,
    727			get_pcie_gen_support(data->pcie_gen_cap,
    728					PP_Min_PCIEGen),
    729			get_pcie_lane_support(data->pcie_lane_cap,
    730					PP_Max_PCIELane));
    731
    732		if (data->pcie_dpm_key_disabled)
    733			phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
    734				data->dpm_table.pcie_speed_table.count,
    735				smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));
    736	}
    737	return 0;
    738}
    739
    740static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
    741{
    742	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    743
    744	memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
    745
    746	phm_reset_single_dpm_table(
    747			&data->dpm_table.sclk_table,
    748				smum_get_mac_definition(hwmgr,
    749					SMU_MAX_LEVELS_GRAPHICS),
    750					MAX_REGULAR_DPM_NUMBER);
    751	phm_reset_single_dpm_table(
    752			&data->dpm_table.mclk_table,
    753			smum_get_mac_definition(hwmgr,
    754				SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
    755
    756	phm_reset_single_dpm_table(
    757			&data->dpm_table.vddc_table,
    758				smum_get_mac_definition(hwmgr,
    759					SMU_MAX_LEVELS_VDDC),
    760					MAX_REGULAR_DPM_NUMBER);
    761	phm_reset_single_dpm_table(
    762			&data->dpm_table.vddci_table,
    763			smum_get_mac_definition(hwmgr,
    764				SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
    765
    766	phm_reset_single_dpm_table(
    767			&data->dpm_table.mvdd_table,
    768				smum_get_mac_definition(hwmgr,
    769					SMU_MAX_LEVELS_MVDD),
    770					MAX_REGULAR_DPM_NUMBER);
    771	return 0;
    772}
    773/*
    774 * This function is to initialize all DPM state tables
    775 * for SMU7 based on the dependency table.
    776 * Dynamic state patching function will then trim these
    777 * state tables to the allowed range based
    778 * on the power policy or external client requests,
    779 * such as UVD request, etc.
    780 */
    781
    782static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
    783{
    784	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    785	struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
    786		hwmgr->dyn_state.vddc_dependency_on_sclk;
    787	struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
    788		hwmgr->dyn_state.vddc_dependency_on_mclk;
    789	struct phm_cac_leakage_table *std_voltage_table =
    790		hwmgr->dyn_state.cac_leakage_table;
    791	uint32_t i;
    792
    793	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
    794		"SCLK dependency table is missing. This table is mandatory", return -EINVAL);
    795	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
    796		"SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
    797
    798	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
    799		"MCLK dependency table is missing. This table is mandatory", return -EINVAL);
    800	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
    801		"VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
    802
    803
    804	/* Initialize Sclk DPM table based on allow Sclk values*/
    805	data->dpm_table.sclk_table.count = 0;
    806
    807	for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
    808		if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
    809				allowed_vdd_sclk_table->entries[i].clk) {
    810			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
    811				allowed_vdd_sclk_table->entries[i].clk;
    812			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
    813			data->dpm_table.sclk_table.count++;
    814		}
    815	}
    816
    817	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
    818		"MCLK dependency table is missing. This table is mandatory", return -EINVAL);
    819	/* Initialize Mclk DPM table based on allow Mclk values */
    820	data->dpm_table.mclk_table.count = 0;
    821	for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
    822		if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
    823			allowed_vdd_mclk_table->entries[i].clk) {
    824			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
    825				allowed_vdd_mclk_table->entries[i].clk;
    826			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
    827			data->dpm_table.mclk_table.count++;
    828		}
    829	}
    830
    831	/* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
    832	for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
    833		data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
    834		data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
    835		/* param1 is for corresponding std voltage */
    836		data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
    837	}
    838
    839	data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
    840	allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
    841
    842	if (NULL != allowed_vdd_mclk_table) {
    843		/* Initialize Vddci DPM table based on allow Mclk values */
    844		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
    845			data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
    846			data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
    847		}
    848		data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
    849	}
    850
    851	allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
    852
    853	if (NULL != allowed_vdd_mclk_table) {
    854		/*
    855		 * Initialize MVDD DPM table based on allow Mclk
    856		 * values
    857		 */
    858		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
    859			data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
    860			data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
    861		}
    862		data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
    863	}
    864
    865	return 0;
    866}
    867
    868static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
    869{
    870	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    871	struct phm_ppt_v1_information *table_info =
    872			(struct phm_ppt_v1_information *)(hwmgr->pptable);
    873	uint32_t i;
    874
    875	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
    876	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
    877
    878	if (table_info == NULL)
    879		return -EINVAL;
    880
    881	dep_sclk_table = table_info->vdd_dep_on_sclk;
    882	dep_mclk_table = table_info->vdd_dep_on_mclk;
    883
    884	PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
    885			"SCLK dependency table is missing.",
    886			return -EINVAL);
    887	PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
    888			"SCLK dependency table count is 0.",
    889			return -EINVAL);
    890
    891	PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
    892			"MCLK dependency table is missing.",
    893			return -EINVAL);
    894	PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
    895			"MCLK dependency table count is 0",
    896			return -EINVAL);
    897
    898	/* Initialize Sclk DPM table based on allow Sclk values */
    899	data->dpm_table.sclk_table.count = 0;
    900	for (i = 0; i < dep_sclk_table->count; i++) {
    901		if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
    902						dep_sclk_table->entries[i].clk) {
    903
    904			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
    905					dep_sclk_table->entries[i].clk;
    906
    907			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
    908					(i == 0) ? true : false;
    909			data->dpm_table.sclk_table.count++;
    910		}
    911	}
    912	if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
    913		hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
    914	/* Initialize Mclk DPM table based on allow Mclk values */
    915	data->dpm_table.mclk_table.count = 0;
    916	for (i = 0; i < dep_mclk_table->count; i++) {
    917		if (i == 0 || data->dpm_table.mclk_table.dpm_levels
    918				[data->dpm_table.mclk_table.count - 1].value !=
    919						dep_mclk_table->entries[i].clk) {
    920			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
    921							dep_mclk_table->entries[i].clk;
    922			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
    923							(i == 0) ? true : false;
    924			data->dpm_table.mclk_table.count++;
    925		}
    926	}
    927
    928	if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
    929		hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
    930	return 0;
    931}
    932
    933static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
    934{
    935	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    936	struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
    937	struct phm_ppt_v1_information *table_info =
    938			(struct phm_ppt_v1_information *)(hwmgr->pptable);
    939	uint32_t i;
    940
    941	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
    942	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
    943	struct phm_odn_performance_level *entries;
    944
    945	if (table_info == NULL)
    946		return -EINVAL;
    947
    948	dep_sclk_table = table_info->vdd_dep_on_sclk;
    949	dep_mclk_table = table_info->vdd_dep_on_mclk;
    950
    951	odn_table->odn_core_clock_dpm_levels.num_of_pl =
    952						data->golden_dpm_table.sclk_table.count;
    953	entries = odn_table->odn_core_clock_dpm_levels.entries;
    954	for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
    955		entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
    956		entries[i].enabled = true;
    957		entries[i].vddc = dep_sclk_table->entries[i].vddc;
    958	}
    959
    960	smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
    961		(struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
    962
    963	odn_table->odn_memory_clock_dpm_levels.num_of_pl =
    964						data->golden_dpm_table.mclk_table.count;
    965	entries = odn_table->odn_memory_clock_dpm_levels.entries;
    966	for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
    967		entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
    968		entries[i].enabled = true;
    969		entries[i].vddc = dep_mclk_table->entries[i].vddc;
    970	}
    971
    972	smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
    973		(struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
    974
    975	return 0;
    976}
    977
    978static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
    979{
    980	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
    981	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
    982	struct phm_ppt_v1_information *table_info =
    983			(struct phm_ppt_v1_information *)(hwmgr->pptable);
    984	uint32_t min_vddc = 0;
    985	uint32_t max_vddc = 0;
    986
    987	if (!table_info)
    988		return;
    989
    990	dep_sclk_table = table_info->vdd_dep_on_sclk;
    991
    992	atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
    993
    994	if (min_vddc == 0 || min_vddc > 2000
    995		|| min_vddc > dep_sclk_table->entries[0].vddc)
    996		min_vddc = dep_sclk_table->entries[0].vddc;
    997
    998	if (max_vddc == 0 || max_vddc > 2000
    999		|| max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
   1000		max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
   1001
   1002	data->odn_dpm_table.min_vddc = min_vddc;
   1003	data->odn_dpm_table.max_vddc = max_vddc;
   1004}
   1005
   1006static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
   1007{
   1008	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1009	struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
   1010	struct phm_ppt_v1_information *table_info =
   1011			(struct phm_ppt_v1_information *)(hwmgr->pptable);
   1012	uint32_t i;
   1013
   1014	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
   1015	struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
   1016
   1017	if (table_info == NULL)
   1018		return;
   1019
   1020	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
   1021		if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
   1022					data->dpm_table.sclk_table.dpm_levels[i].value) {
   1023			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
   1024			break;
   1025		}
   1026	}
   1027
   1028	for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
   1029		if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
   1030					data->dpm_table.mclk_table.dpm_levels[i].value) {
   1031			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
   1032			break;
   1033		}
   1034	}
   1035
   1036	dep_table = table_info->vdd_dep_on_mclk;
   1037	odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
   1038
   1039	for (i = 0; i < dep_table->count; i++) {
   1040		if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
   1041			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
   1042			return;
   1043		}
   1044	}
   1045
   1046	dep_table = table_info->vdd_dep_on_sclk;
   1047	odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
   1048	for (i = 0; i < dep_table->count; i++) {
   1049		if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
   1050			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
   1051			return;
   1052		}
   1053	}
   1054	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
   1055		data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
   1056		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
   1057	}
   1058}
   1059
   1060static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
   1061{
   1062	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1063
   1064	smu7_reset_dpm_tables(hwmgr);
   1065
   1066	if (hwmgr->pp_table_version == PP_TABLE_V1)
   1067		smu7_setup_dpm_tables_v1(hwmgr);
   1068	else if (hwmgr->pp_table_version == PP_TABLE_V0)
   1069		smu7_setup_dpm_tables_v0(hwmgr);
   1070
   1071	smu7_setup_default_pcie_table(hwmgr);
   1072
   1073	/* save a copy of the default DPM table */
   1074	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
   1075			sizeof(struct smu7_dpm_table));
   1076
   1077	/* initialize ODN table */
   1078	if (hwmgr->od_enabled) {
   1079		if (data->odn_dpm_table.max_vddc) {
   1080			smu7_check_dpm_table_updated(hwmgr);
   1081		} else {
   1082			smu7_setup_voltage_range_from_vbios(hwmgr);
   1083			smu7_odn_initial_default_setting(hwmgr);
   1084		}
   1085	}
   1086	return 0;
   1087}
   1088
   1089static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
   1090{
   1091
   1092	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1093			PHM_PlatformCaps_RegulatorHot))
   1094		return smum_send_msg_to_smc(hwmgr,
   1095				PPSMC_MSG_EnableVRHotGPIOInterrupt,
   1096				NULL);
   1097
   1098	return 0;
   1099}
   1100
   1101static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
   1102{
   1103	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
   1104			SCLK_PWRMGT_OFF, 0);
   1105	return 0;
   1106}
   1107
   1108static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
   1109{
   1110	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1111
   1112	if (data->ulv_supported)
   1113		return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
   1114
   1115	return 0;
   1116}
   1117
   1118static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
   1119{
   1120	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1121
   1122	if (data->ulv_supported)
   1123		return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
   1124
   1125	return 0;
   1126}
   1127
   1128static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
   1129{
   1130	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1131			PHM_PlatformCaps_SclkDeepSleep)) {
   1132		if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
   1133			PP_ASSERT_WITH_CODE(false,
   1134					"Attempt to enable Master Deep Sleep switch failed!",
   1135					return -EINVAL);
   1136	} else {
   1137		if (smum_send_msg_to_smc(hwmgr,
   1138				PPSMC_MSG_MASTER_DeepSleep_OFF,
   1139				NULL)) {
   1140			PP_ASSERT_WITH_CODE(false,
   1141					"Attempt to disable Master Deep Sleep switch failed!",
   1142					return -EINVAL);
   1143		}
   1144	}
   1145
   1146	return 0;
   1147}
   1148
   1149static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
   1150{
   1151	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1152			PHM_PlatformCaps_SclkDeepSleep)) {
   1153		if (smum_send_msg_to_smc(hwmgr,
   1154				PPSMC_MSG_MASTER_DeepSleep_OFF,
   1155				NULL)) {
   1156			PP_ASSERT_WITH_CODE(false,
   1157					"Attempt to disable Master Deep Sleep switch failed!",
   1158					return -EINVAL);
   1159		}
   1160	}
   1161
   1162	return 0;
   1163}
   1164
   1165static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
   1166{
   1167	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1168	uint32_t soft_register_value = 0;
   1169	uint32_t handshake_disables_offset = data->soft_regs_start
   1170				+ smum_get_offsetof(hwmgr,
   1171					SMU_SoftRegisters, HandshakeDisables);
   1172
   1173	soft_register_value = cgs_read_ind_register(hwmgr->device,
   1174				CGS_IND_REG__SMC, handshake_disables_offset);
   1175	soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
   1176	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   1177			handshake_disables_offset, soft_register_value);
   1178	return 0;
   1179}
   1180
   1181static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
   1182{
   1183	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1184	uint32_t soft_register_value = 0;
   1185	uint32_t handshake_disables_offset = data->soft_regs_start
   1186				+ smum_get_offsetof(hwmgr,
   1187					SMU_SoftRegisters, HandshakeDisables);
   1188
   1189	soft_register_value = cgs_read_ind_register(hwmgr->device,
   1190				CGS_IND_REG__SMC, handshake_disables_offset);
   1191	soft_register_value |= smum_get_mac_definition(hwmgr,
   1192					SMU_UVD_MCLK_HANDSHAKE_DISABLE);
   1193	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   1194			handshake_disables_offset, soft_register_value);
   1195	return 0;
   1196}
   1197
   1198static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
   1199{
   1200	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1201
   1202	/* enable SCLK dpm */
   1203	if (!data->sclk_dpm_key_disabled) {
   1204		if (hwmgr->chip_id >= CHIP_POLARIS10 &&
   1205		    hwmgr->chip_id <= CHIP_VEGAM)
   1206			smu7_disable_sclk_vce_handshake(hwmgr);
   1207
   1208		PP_ASSERT_WITH_CODE(
   1209		(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
   1210		"Failed to enable SCLK DPM during DPM Start Function!",
   1211		return -EINVAL);
   1212	}
   1213
   1214	/* enable MCLK dpm */
   1215	if (0 == data->mclk_dpm_key_disabled) {
   1216		if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
   1217			smu7_disable_handshake_uvd(hwmgr);
   1218
   1219		PP_ASSERT_WITH_CODE(
   1220				(0 == smum_send_msg_to_smc(hwmgr,
   1221						PPSMC_MSG_MCLKDPM_Enable,
   1222						NULL)),
   1223				"Failed to enable MCLK DPM during DPM Start Function!",
   1224				return -EINVAL);
   1225
   1226		if ((hwmgr->chip_family == AMDGPU_FAMILY_CI) ||
   1227		    (hwmgr->chip_id == CHIP_POLARIS10) ||
   1228		    (hwmgr->chip_id == CHIP_POLARIS11) ||
   1229		    (hwmgr->chip_id == CHIP_POLARIS12) ||
   1230		    (hwmgr->chip_id == CHIP_TONGA) ||
   1231		    (hwmgr->chip_id == CHIP_TOPAZ))
   1232			PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
   1233
   1234
   1235		if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
   1236			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
   1237			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
   1238			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
   1239			udelay(10);
   1240			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
   1241			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
   1242			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
   1243		} else {
   1244			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
   1245			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
   1246			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
   1247			udelay(10);
   1248			if (hwmgr->chip_id == CHIP_VEGAM) {
   1249				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
   1250				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
   1251			} else {
   1252				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
   1253				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
   1254			}
   1255			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
   1256		}
   1257	}
   1258
   1259	return 0;
   1260}
   1261
   1262static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
   1263{
   1264	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1265
   1266	/*enable general power management */
   1267
   1268	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
   1269			GLOBAL_PWRMGT_EN, 1);
   1270
   1271	/* enable sclk deep sleep */
   1272
   1273	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
   1274			DYNAMIC_PM_EN, 1);
   1275
   1276	/* prepare for PCIE DPM */
   1277
   1278	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   1279			data->soft_regs_start +
   1280			smum_get_offsetof(hwmgr, SMU_SoftRegisters,
   1281						VoltageChangeTimeout), 0x1000);
   1282	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
   1283			SWRST_COMMAND_1, RESETLC, 0x0);
   1284
   1285	if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
   1286		cgs_write_register(hwmgr->device, 0x1488,
   1287			(cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
   1288
   1289	if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
   1290		pr_err("Failed to enable Sclk DPM and Mclk DPM!");
   1291		return -EINVAL;
   1292	}
   1293
   1294	/* enable PCIE dpm */
   1295	if (0 == data->pcie_dpm_key_disabled) {
   1296		PP_ASSERT_WITH_CODE(
   1297				(0 == smum_send_msg_to_smc(hwmgr,
   1298						PPSMC_MSG_PCIeDPM_Enable,
   1299						NULL)),
   1300				"Failed to enable pcie DPM during DPM Start Function!",
   1301				return -EINVAL);
   1302	} else {
   1303		PP_ASSERT_WITH_CODE(
   1304				(0 == smum_send_msg_to_smc(hwmgr,
   1305						PPSMC_MSG_PCIeDPM_Disable,
   1306						NULL)),
   1307				"Failed to disable pcie DPM during DPM Start Function!",
   1308				return -EINVAL);
   1309	}
   1310
   1311	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1312				PHM_PlatformCaps_Falcon_QuickTransition)) {
   1313		PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
   1314				PPSMC_MSG_EnableACDCGPIOInterrupt,
   1315				NULL)),
   1316				"Failed to enable AC DC GPIO Interrupt!",
   1317				);
   1318	}
   1319
   1320	return 0;
   1321}
   1322
   1323static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
   1324{
   1325	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1326
   1327	/* disable SCLK dpm */
   1328	if (!data->sclk_dpm_key_disabled) {
   1329		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
   1330				"Trying to disable SCLK DPM when DPM is disabled",
   1331				return 0);
   1332		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
   1333	}
   1334
   1335	/* disable MCLK dpm */
   1336	if (!data->mclk_dpm_key_disabled) {
   1337		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
   1338				"Trying to disable MCLK DPM when DPM is disabled",
   1339				return 0);
   1340		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
   1341	}
   1342
   1343	return 0;
   1344}
   1345
   1346static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
   1347{
   1348	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1349
   1350	/* disable general power management */
   1351	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
   1352			GLOBAL_PWRMGT_EN, 0);
   1353	/* disable sclk deep sleep */
   1354	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
   1355			DYNAMIC_PM_EN, 0);
   1356
   1357	/* disable PCIE dpm */
   1358	if (!data->pcie_dpm_key_disabled) {
   1359		PP_ASSERT_WITH_CODE(
   1360				(smum_send_msg_to_smc(hwmgr,
   1361						PPSMC_MSG_PCIeDPM_Disable,
   1362						NULL) == 0),
   1363				"Failed to disable pcie DPM during DPM Stop Function!",
   1364				return -EINVAL);
   1365	}
   1366
   1367	smu7_disable_sclk_mclk_dpm(hwmgr);
   1368
   1369	PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
   1370			"Trying to disable voltage DPM when DPM is disabled",
   1371			return 0);
   1372
   1373	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
   1374
   1375	return 0;
   1376}
   1377
   1378static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
   1379{
   1380	bool protection;
   1381	enum DPM_EVENT_SRC src;
   1382
   1383	switch (sources) {
   1384	default:
   1385		pr_err("Unknown throttling event sources.");
   1386		fallthrough;
   1387	case 0:
   1388		protection = false;
   1389		/* src is unused */
   1390		break;
   1391	case (1 << PHM_AutoThrottleSource_Thermal):
   1392		protection = true;
   1393		src = DPM_EVENT_SRC_DIGITAL;
   1394		break;
   1395	case (1 << PHM_AutoThrottleSource_External):
   1396		protection = true;
   1397		src = DPM_EVENT_SRC_EXTERNAL;
   1398		break;
   1399	case (1 << PHM_AutoThrottleSource_External) |
   1400			(1 << PHM_AutoThrottleSource_Thermal):
   1401		protection = true;
   1402		src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
   1403		break;
   1404	}
   1405	/* Order matters - don't enable thermal protection for the wrong source. */
   1406	if (protection) {
   1407		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
   1408				DPM_EVENT_SRC, src);
   1409		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
   1410				THERMAL_PROTECTION_DIS,
   1411				!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1412						PHM_PlatformCaps_ThermalController));
   1413	} else
   1414		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
   1415				THERMAL_PROTECTION_DIS, 1);
   1416}
   1417
   1418static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
   1419		PHM_AutoThrottleSource source)
   1420{
   1421	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1422
   1423	if (!(data->active_auto_throttle_sources & (1 << source))) {
   1424		data->active_auto_throttle_sources |= 1 << source;
   1425		smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
   1426	}
   1427	return 0;
   1428}
   1429
   1430static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
   1431{
   1432	return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
   1433}
   1434
   1435static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
   1436		PHM_AutoThrottleSource source)
   1437{
   1438	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1439
   1440	if (data->active_auto_throttle_sources & (1 << source)) {
   1441		data->active_auto_throttle_sources &= ~(1 << source);
   1442		smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
   1443	}
   1444	return 0;
   1445}
   1446
   1447static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
   1448{
   1449	return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
   1450}
   1451
   1452static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
   1453{
   1454	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1455	data->pcie_performance_request = true;
   1456
   1457	return 0;
   1458}
   1459
   1460static int smu7_program_edc_didt_registers(struct pp_hwmgr *hwmgr,
   1461					   uint32_t *cac_config_regs,
   1462					   AtomCtrl_EDCLeakgeTable *edc_leakage_table)
   1463{
   1464	uint32_t data, i = 0;
   1465
   1466	while (cac_config_regs[i] != 0xFFFFFFFF) {
   1467		data = edc_leakage_table->DIDT_REG[i];
   1468		cgs_write_ind_register(hwmgr->device,
   1469				       CGS_IND_REG__DIDT,
   1470				       cac_config_regs[i],
   1471				       data);
   1472		i++;
   1473	}
   1474
   1475	return 0;
   1476}
   1477
   1478static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
   1479{
   1480	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1481	int ret = 0;
   1482
   1483	if (!data->disable_edc_leakage_controller &&
   1484	    data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
   1485	    data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
   1486		ret = smu7_program_edc_didt_registers(hwmgr,
   1487						      DIDTEDCConfig_P12,
   1488						      &data->edc_leakage_table);
   1489		if (ret)
   1490			return ret;
   1491
   1492		ret = smum_send_msg_to_smc(hwmgr,
   1493					   (PPSMC_Msg)PPSMC_MSG_EnableEDCController,
   1494					   NULL);
   1495	} else {
   1496		ret = smum_send_msg_to_smc(hwmgr,
   1497					   (PPSMC_Msg)PPSMC_MSG_DisableEDCController,
   1498					   NULL);
   1499	}
   1500
   1501	return ret;
   1502}
   1503
   1504static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
   1505{
   1506	int tmp_result = 0;
   1507	int result = 0;
   1508
   1509	if (smu7_voltage_control(hwmgr)) {
   1510		tmp_result = smu7_enable_voltage_control(hwmgr);
   1511		PP_ASSERT_WITH_CODE(tmp_result == 0,
   1512				"Failed to enable voltage control!",
   1513				result = tmp_result);
   1514
   1515		tmp_result = smu7_construct_voltage_tables(hwmgr);
   1516		PP_ASSERT_WITH_CODE((0 == tmp_result),
   1517				"Failed to construct voltage tables!",
   1518				result = tmp_result);
   1519	}
   1520	smum_initialize_mc_reg_table(hwmgr);
   1521
   1522	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1523			PHM_PlatformCaps_EngineSpreadSpectrumSupport))
   1524		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   1525				GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
   1526
   1527	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1528			PHM_PlatformCaps_ThermalController))
   1529		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   1530				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
   1531
   1532	tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
   1533	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1534			"Failed to program static screen threshold parameters!",
   1535			result = tmp_result);
   1536
   1537	tmp_result = smu7_enable_display_gap(hwmgr);
   1538	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1539			"Failed to enable display gap!", result = tmp_result);
   1540
   1541	tmp_result = smu7_program_voting_clients(hwmgr);
   1542	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1543			"Failed to program voting clients!", result = tmp_result);
   1544
   1545	tmp_result = smum_process_firmware_header(hwmgr);
   1546	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1547			"Failed to process firmware header!", result = tmp_result);
   1548
   1549	if (hwmgr->chip_id != CHIP_VEGAM) {
   1550		tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
   1551		PP_ASSERT_WITH_CODE((0 == tmp_result),
   1552				"Failed to initialize switch from ArbF0 to F1!",
   1553				result = tmp_result);
   1554	}
   1555
   1556	result = smu7_setup_default_dpm_tables(hwmgr);
   1557	PP_ASSERT_WITH_CODE(0 == result,
   1558			"Failed to setup default DPM tables!", return result);
   1559
   1560	tmp_result = smum_init_smc_table(hwmgr);
   1561	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1562			"Failed to initialize SMC table!", result = tmp_result);
   1563
   1564	tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
   1565	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1566			"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
   1567
   1568	if (hwmgr->chip_id >= CHIP_POLARIS10 &&
   1569	    hwmgr->chip_id <= CHIP_VEGAM) {
   1570		tmp_result = smu7_notify_has_display(hwmgr);
   1571		PP_ASSERT_WITH_CODE((0 == tmp_result),
   1572				"Failed to enable display setting!", result = tmp_result);
   1573	} else {
   1574		smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
   1575	}
   1576
   1577	if (hwmgr->chip_id >= CHIP_POLARIS10 &&
   1578	    hwmgr->chip_id <= CHIP_VEGAM) {
   1579		tmp_result = smu7_populate_edc_leakage_registers(hwmgr);
   1580		PP_ASSERT_WITH_CODE((0 == tmp_result),
   1581				"Failed to populate edc leakage registers!", result = tmp_result);
   1582	}
   1583
   1584	tmp_result = smu7_enable_sclk_control(hwmgr);
   1585	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1586			"Failed to enable SCLK control!", result = tmp_result);
   1587
   1588	tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
   1589	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1590			"Failed to enable voltage control!", result = tmp_result);
   1591
   1592	tmp_result = smu7_enable_ulv(hwmgr);
   1593	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1594			"Failed to enable ULV!", result = tmp_result);
   1595
   1596	tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
   1597	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1598			"Failed to enable deep sleep master switch!", result = tmp_result);
   1599
   1600	tmp_result = smu7_enable_didt_config(hwmgr);
   1601	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1602			"Failed to enable deep sleep master switch!", result = tmp_result);
   1603
   1604	tmp_result = smu7_start_dpm(hwmgr);
   1605	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1606			"Failed to start DPM!", result = tmp_result);
   1607
   1608	tmp_result = smu7_enable_smc_cac(hwmgr);
   1609	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1610			"Failed to enable SMC CAC!", result = tmp_result);
   1611
   1612	tmp_result = smu7_enable_power_containment(hwmgr);
   1613	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1614			"Failed to enable power containment!", result = tmp_result);
   1615
   1616	tmp_result = smu7_power_control_set_level(hwmgr);
   1617	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1618			"Failed to power control set level!", result = tmp_result);
   1619
   1620	tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
   1621	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1622			"Failed to enable thermal auto throttle!", result = tmp_result);
   1623
   1624	tmp_result = smu7_pcie_performance_request(hwmgr);
   1625	PP_ASSERT_WITH_CODE((0 == tmp_result),
   1626			"pcie performance request failed!", result = tmp_result);
   1627
   1628	return 0;
   1629}
   1630
   1631static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
   1632{
   1633	if (!hwmgr->avfs_supported)
   1634		return 0;
   1635
   1636	if (enable) {
   1637		if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
   1638				CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
   1639			PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
   1640					hwmgr, PPSMC_MSG_EnableAvfs, NULL),
   1641					"Failed to enable AVFS!",
   1642					return -EINVAL);
   1643		}
   1644	} else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
   1645			CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
   1646		PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
   1647				hwmgr, PPSMC_MSG_DisableAvfs, NULL),
   1648				"Failed to disable AVFS!",
   1649				return -EINVAL);
   1650	}
   1651
   1652	return 0;
   1653}
   1654
   1655static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
   1656{
   1657	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1658
   1659	if (!hwmgr->avfs_supported)
   1660		return 0;
   1661
   1662	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
   1663		smu7_avfs_control(hwmgr, false);
   1664	} else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
   1665		smu7_avfs_control(hwmgr, false);
   1666		smu7_avfs_control(hwmgr, true);
   1667	} else {
   1668		smu7_avfs_control(hwmgr, true);
   1669	}
   1670
   1671	return 0;
   1672}
   1673
   1674static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
   1675{
   1676	int tmp_result, result = 0;
   1677
   1678	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1679			PHM_PlatformCaps_ThermalController))
   1680		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   1681				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
   1682
   1683	tmp_result = smu7_disable_power_containment(hwmgr);
   1684	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1685			"Failed to disable power containment!", result = tmp_result);
   1686
   1687	tmp_result = smu7_disable_smc_cac(hwmgr);
   1688	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1689			"Failed to disable SMC CAC!", result = tmp_result);
   1690
   1691	tmp_result = smu7_disable_didt_config(hwmgr);
   1692	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1693			"Failed to disable DIDT!", result = tmp_result);
   1694
   1695	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   1696			CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
   1697	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   1698			GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
   1699
   1700	tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
   1701	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1702			"Failed to disable thermal auto throttle!", result = tmp_result);
   1703
   1704	tmp_result = smu7_avfs_control(hwmgr, false);
   1705	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1706			"Failed to disable AVFS!", result = tmp_result);
   1707
   1708	tmp_result = smu7_stop_dpm(hwmgr);
   1709	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1710			"Failed to stop DPM!", result = tmp_result);
   1711
   1712	tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
   1713	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1714			"Failed to disable deep sleep master switch!", result = tmp_result);
   1715
   1716	tmp_result = smu7_disable_ulv(hwmgr);
   1717	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1718			"Failed to disable ULV!", result = tmp_result);
   1719
   1720	tmp_result = smu7_clear_voting_clients(hwmgr);
   1721	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1722			"Failed to clear voting clients!", result = tmp_result);
   1723
   1724	tmp_result = smu7_reset_to_default(hwmgr);
   1725	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1726			"Failed to reset to default!", result = tmp_result);
   1727
   1728	tmp_result = smum_stop_smc(hwmgr);
   1729	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1730			"Failed to stop smc!", result = tmp_result);
   1731
   1732	tmp_result = smu7_force_switch_to_arbf0(hwmgr);
   1733	PP_ASSERT_WITH_CODE((tmp_result == 0),
   1734			"Failed to force to switch arbf0!", result = tmp_result);
   1735
   1736	return result;
   1737}
   1738
   1739static bool intel_core_rkl_chk(void)
   1740{
   1741#if IS_ENABLED(CONFIG_X86_64)
   1742	struct cpuinfo_x86 *c = &cpu_data(0);
   1743
   1744	return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
   1745#else
   1746	return false;
   1747#endif
   1748}
   1749
   1750static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
   1751{
   1752	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1753	struct phm_ppt_v1_information *table_info =
   1754			(struct phm_ppt_v1_information *)(hwmgr->pptable);
   1755	struct amdgpu_device *adev = hwmgr->adev;
   1756	uint8_t tmp1, tmp2;
   1757	uint16_t tmp3 = 0;
   1758
   1759	data->dll_default_on = false;
   1760	data->mclk_dpm0_activity_target = 0xa;
   1761	data->vddc_vddgfx_delta = 300;
   1762	data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
   1763	data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
   1764	data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
   1765	data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
   1766	data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
   1767	data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
   1768	data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
   1769	data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
   1770	data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
   1771	data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
   1772
   1773	data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
   1774	data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
   1775	data->pcie_dpm_key_disabled =
   1776		intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
   1777	/* need to set voltage control types before EVV patching */
   1778	data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
   1779	data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
   1780	data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
   1781	data->enable_tdc_limit_feature = true;
   1782	data->enable_pkg_pwr_tracking_feature = true;
   1783	data->force_pcie_gen = PP_PCIEGenInvalid;
   1784	data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
   1785	data->current_profile_setting.bupdate_sclk = 1;
   1786	data->current_profile_setting.sclk_up_hyst = 0;
   1787	data->current_profile_setting.sclk_down_hyst = 100;
   1788	data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
   1789	data->current_profile_setting.bupdate_mclk = 1;
   1790	if (hwmgr->chip_id >= CHIP_POLARIS10) {
   1791		if (adev->gmc.vram_width == 256) {
   1792			data->current_profile_setting.mclk_up_hyst = 10;
   1793			data->current_profile_setting.mclk_down_hyst = 60;
   1794			data->current_profile_setting.mclk_activity = 25;
   1795		} else if (adev->gmc.vram_width == 128) {
   1796			data->current_profile_setting.mclk_up_hyst = 5;
   1797			data->current_profile_setting.mclk_down_hyst = 16;
   1798			data->current_profile_setting.mclk_activity = 20;
   1799		} else if (adev->gmc.vram_width == 64) {
   1800			data->current_profile_setting.mclk_up_hyst = 3;
   1801			data->current_profile_setting.mclk_down_hyst = 16;
   1802			data->current_profile_setting.mclk_activity = 20;
   1803		}
   1804	} else {
   1805		data->current_profile_setting.mclk_up_hyst = 0;
   1806		data->current_profile_setting.mclk_down_hyst = 100;
   1807		data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
   1808	}
   1809	hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
   1810	hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
   1811	hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
   1812
   1813	if (hwmgr->chip_id  == CHIP_HAWAII) {
   1814		data->thermal_temp_setting.temperature_low = 94500;
   1815		data->thermal_temp_setting.temperature_high = 95000;
   1816		data->thermal_temp_setting.temperature_shutdown = 104000;
   1817	} else {
   1818		data->thermal_temp_setting.temperature_low = 99500;
   1819		data->thermal_temp_setting.temperature_high = 100000;
   1820		data->thermal_temp_setting.temperature_shutdown = 104000;
   1821	}
   1822
   1823	data->fast_watermark_threshold = 100;
   1824	if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
   1825			VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
   1826		data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
   1827	else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
   1828			VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
   1829		data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
   1830
   1831	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1832			PHM_PlatformCaps_ControlVDDGFX)) {
   1833		if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
   1834			VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
   1835			data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
   1836		}
   1837	}
   1838
   1839	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1840			PHM_PlatformCaps_EnableMVDDControl)) {
   1841		if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
   1842				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
   1843			data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
   1844		else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
   1845				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
   1846			data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
   1847	}
   1848
   1849	if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
   1850		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
   1851			PHM_PlatformCaps_ControlVDDGFX);
   1852
   1853	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   1854			PHM_PlatformCaps_ControlVDDCI)) {
   1855		if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
   1856				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
   1857			data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
   1858		else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
   1859				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
   1860			data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
   1861	}
   1862
   1863	if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
   1864		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
   1865				PHM_PlatformCaps_EnableMVDDControl);
   1866
   1867	if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
   1868		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
   1869				PHM_PlatformCaps_ControlVDDCI);
   1870
   1871	data->vddc_phase_shed_control = 1;
   1872	if ((hwmgr->chip_id == CHIP_POLARIS12) ||
   1873	    ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
   1874	    ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
   1875	    ASICID_IS_P30(adev->pdev->device, adev->pdev->revision) ||
   1876	    ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
   1877		if (data->voltage_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
   1878			atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
   1879							&tmp3);
   1880			tmp3 = (tmp3 >> 5) & 0x3;
   1881			data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
   1882		}
   1883	} else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
   1884		data->vddc_phase_shed_control = 1;
   1885	}
   1886
   1887	if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
   1888		&& (table_info->cac_dtp_table->usClockStretchAmount != 0))
   1889		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
   1890					PHM_PlatformCaps_ClockStretcher);
   1891
   1892	data->pcie_gen_performance.max = PP_PCIEGen1;
   1893	data->pcie_gen_performance.min = PP_PCIEGen3;
   1894	data->pcie_gen_power_saving.max = PP_PCIEGen1;
   1895	data->pcie_gen_power_saving.min = PP_PCIEGen3;
   1896	data->pcie_lane_performance.max = 0;
   1897	data->pcie_lane_performance.min = 16;
   1898	data->pcie_lane_power_saving.max = 0;
   1899	data->pcie_lane_power_saving.min = 16;
   1900
   1901
   1902	if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
   1903		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
   1904			      PHM_PlatformCaps_UVDPowerGating);
   1905	if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
   1906		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
   1907			      PHM_PlatformCaps_VCEPowerGating);
   1908
   1909	data->disable_edc_leakage_controller = true;
   1910	if (((adev->asic_type == CHIP_POLARIS10) && hwmgr->is_kicker) ||
   1911	    ((adev->asic_type == CHIP_POLARIS11) && hwmgr->is_kicker) ||
   1912	    (adev->asic_type == CHIP_POLARIS12) ||
   1913	    (adev->asic_type == CHIP_VEGAM))
   1914		data->disable_edc_leakage_controller = false;
   1915
   1916	if (!atomctrl_is_asic_internal_ss_supported(hwmgr)) {
   1917		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
   1918			PHM_PlatformCaps_MemorySpreadSpectrumSupport);
   1919		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
   1920			PHM_PlatformCaps_EngineSpreadSpectrumSupport);
   1921	}
   1922
   1923	if ((adev->pdev->device == 0x699F) &&
   1924	    (adev->pdev->revision == 0xCF)) {
   1925		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
   1926				PHM_PlatformCaps_PowerContainment);
   1927		data->enable_tdc_limit_feature = false;
   1928		data->enable_pkg_pwr_tracking_feature = false;
   1929		data->disable_edc_leakage_controller = true;
   1930		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
   1931					PHM_PlatformCaps_ClockStretcher);
   1932	}
   1933}
   1934
   1935static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr)
   1936{
   1937	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1938	struct amdgpu_device *adev = hwmgr->adev;
   1939	uint32_t asicrev1, evv_revision, max = 0, min = 0;
   1940
   1941	atomctrl_read_efuse(hwmgr, STRAP_EVV_REVISION_LSB, STRAP_EVV_REVISION_MSB,
   1942			&evv_revision);
   1943
   1944	atomctrl_read_efuse(hwmgr, 568, 579, &asicrev1);
   1945
   1946	if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
   1947	    ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
   1948		min = 1200;
   1949		max = 2500;
   1950	} else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
   1951		   ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
   1952		min = 900;
   1953		max= 2100;
   1954	} else if (hwmgr->chip_id == CHIP_POLARIS10) {
   1955		if (adev->pdev->subsystem_vendor == 0x106B) {
   1956			min = 1000;
   1957			max = 2300;
   1958		} else {
   1959			if (evv_revision == 0) {
   1960				min = 1000;
   1961				max = 2300;
   1962			} else if (evv_revision == 1) {
   1963				if (asicrev1 == 326) {
   1964					min = 1200;
   1965					max = 2500;
   1966					/* TODO: PATCH RO in VBIOS */
   1967				} else {
   1968					min = 1200;
   1969					max = 2000;
   1970				}
   1971			} else if (evv_revision == 2) {
   1972				min = 1200;
   1973				max = 2500;
   1974			}
   1975		}
   1976	} else {
   1977		min = 1100;
   1978		max = 2100;
   1979	}
   1980
   1981	data->ro_range_minimum = min;
   1982	data->ro_range_maximum = max;
   1983
   1984	/* TODO: PATCH RO in VBIOS here */
   1985
   1986	return 0;
   1987}
   1988
   1989/**
   1990 * smu7_get_evv_voltages - Get Leakage VDDC based on leakage ID.
   1991 *
   1992 * @hwmgr:  the address of the powerplay hardware manager.
   1993 * Return:   always 0
   1994 */
   1995static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
   1996{
   1997	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   1998	uint16_t vv_id;
   1999	uint16_t vddc = 0;
   2000	uint16_t vddgfx = 0;
   2001	uint16_t i, j;
   2002	uint32_t sclk = 0;
   2003	struct phm_ppt_v1_information *table_info =
   2004			(struct phm_ppt_v1_information *)hwmgr->pptable;
   2005	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
   2006
   2007	if (hwmgr->chip_id == CHIP_POLARIS10 ||
   2008	    hwmgr->chip_id == CHIP_POLARIS11 ||
   2009	    hwmgr->chip_id == CHIP_POLARIS12)
   2010		smu7_calculate_ro_range(hwmgr);
   2011
   2012	for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
   2013		vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
   2014
   2015		if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
   2016			if ((hwmgr->pp_table_version == PP_TABLE_V1)
   2017			    && !phm_get_sclk_for_voltage_evv(hwmgr,
   2018						table_info->vddgfx_lookup_table, vv_id, &sclk)) {
   2019				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   2020							PHM_PlatformCaps_ClockStretcher)) {
   2021					sclk_table = table_info->vdd_dep_on_sclk;
   2022
   2023					for (j = 1; j < sclk_table->count; j++) {
   2024						if (sclk_table->entries[j].clk == sclk &&
   2025								sclk_table->entries[j].cks_enable == 0) {
   2026							sclk += 5000;
   2027							break;
   2028						}
   2029					}
   2030				}
   2031				if (0 == atomctrl_get_voltage_evv_on_sclk
   2032				    (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
   2033				     vv_id, &vddgfx)) {
   2034					/* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
   2035					PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
   2036
   2037					/* the voltage should not be zero nor equal to leakage ID */
   2038					if (vddgfx != 0 && vddgfx != vv_id) {
   2039						data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
   2040						data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
   2041						data->vddcgfx_leakage.count++;
   2042					}
   2043				} else {
   2044					pr_info("Error retrieving EVV voltage value!\n");
   2045				}
   2046			}
   2047		} else {
   2048			if ((hwmgr->pp_table_version == PP_TABLE_V0)
   2049				|| !phm_get_sclk_for_voltage_evv(hwmgr,
   2050					table_info->vddc_lookup_table, vv_id, &sclk)) {
   2051				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   2052						PHM_PlatformCaps_ClockStretcher)) {
   2053					if (table_info == NULL)
   2054						return -EINVAL;
   2055					sclk_table = table_info->vdd_dep_on_sclk;
   2056
   2057					for (j = 1; j < sclk_table->count; j++) {
   2058						if (sclk_table->entries[j].clk == sclk &&
   2059								sclk_table->entries[j].cks_enable == 0) {
   2060							sclk += 5000;
   2061							break;
   2062						}
   2063					}
   2064				}
   2065
   2066				if (phm_get_voltage_evv_on_sclk(hwmgr,
   2067							VOLTAGE_TYPE_VDDC,
   2068							sclk, vv_id, &vddc) == 0) {
   2069					if (vddc >= 2000 || vddc == 0)
   2070						return -EINVAL;
   2071				} else {
   2072					pr_debug("failed to retrieving EVV voltage!\n");
   2073					continue;
   2074				}
   2075
   2076				/* the voltage should not be zero nor equal to leakage ID */
   2077				if (vddc != 0 && vddc != vv_id) {
   2078					data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
   2079					data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
   2080					data->vddc_leakage.count++;
   2081				}
   2082			}
   2083		}
   2084	}
   2085
   2086	return 0;
   2087}
   2088
   2089/**
   2090 * smu7_patch_ppt_v1_with_vdd_leakage - Change virtual leakage voltage to actual value.
   2091 *
   2092 * @hwmgr:  the address of the powerplay hardware manager.
   2093 * @voltage: pointer to changing voltage
   2094 * @leakage_table: pointer to leakage table
   2095 */
   2096static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
   2097		uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
   2098{
   2099	uint32_t index;
   2100
   2101	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
   2102	for (index = 0; index < leakage_table->count; index++) {
   2103		/* if this voltage matches a leakage voltage ID */
   2104		/* patch with actual leakage voltage */
   2105		if (leakage_table->leakage_id[index] == *voltage) {
   2106			*voltage = leakage_table->actual_voltage[index];
   2107			break;
   2108		}
   2109	}
   2110
   2111	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
   2112		pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
   2113}
   2114
   2115/**
   2116 * smu7_patch_lookup_table_with_leakage - Patch voltage lookup table by EVV leakages.
   2117 *
   2118 * @hwmgr:  the address of the powerplay hardware manager.
   2119 * @lookup_table: pointer to voltage lookup table
   2120 * @leakage_table: pointer to leakage table
   2121 * Return:     always 0
   2122 */
   2123static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
   2124		phm_ppt_v1_voltage_lookup_table *lookup_table,
   2125		struct smu7_leakage_voltage *leakage_table)
   2126{
   2127	uint32_t i;
   2128
   2129	for (i = 0; i < lookup_table->count; i++)
   2130		smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
   2131				&lookup_table->entries[i].us_vdd, leakage_table);
   2132
   2133	return 0;
   2134}
   2135
   2136static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
   2137		struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
   2138		uint16_t *vddc)
   2139{
   2140	struct phm_ppt_v1_information *table_info =
   2141			(struct phm_ppt_v1_information *)(hwmgr->pptable);
   2142	smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
   2143	hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
   2144			table_info->max_clock_voltage_on_dc.vddc;
   2145	return 0;
   2146}
   2147
   2148static int smu7_patch_voltage_dependency_tables_with_lookup_table(
   2149		struct pp_hwmgr *hwmgr)
   2150{
   2151	uint8_t entry_id;
   2152	uint8_t voltage_id;
   2153	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2154	struct phm_ppt_v1_information *table_info =
   2155			(struct phm_ppt_v1_information *)(hwmgr->pptable);
   2156
   2157	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
   2158			table_info->vdd_dep_on_sclk;
   2159	struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
   2160			table_info->vdd_dep_on_mclk;
   2161	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
   2162			table_info->mm_dep_table;
   2163
   2164	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
   2165		for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
   2166			voltage_id = sclk_table->entries[entry_id].vddInd;
   2167			sclk_table->entries[entry_id].vddgfx =
   2168				table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
   2169		}
   2170	} else {
   2171		for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
   2172			voltage_id = sclk_table->entries[entry_id].vddInd;
   2173			sclk_table->entries[entry_id].vddc =
   2174				table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
   2175		}
   2176	}
   2177
   2178	for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
   2179		voltage_id = mclk_table->entries[entry_id].vddInd;
   2180		mclk_table->entries[entry_id].vddc =
   2181			table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
   2182	}
   2183
   2184	for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
   2185		voltage_id = mm_table->entries[entry_id].vddcInd;
   2186		mm_table->entries[entry_id].vddc =
   2187			table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
   2188	}
   2189
   2190	return 0;
   2191
   2192}
   2193
   2194static int phm_add_voltage(struct pp_hwmgr *hwmgr,
   2195			phm_ppt_v1_voltage_lookup_table *look_up_table,
   2196			phm_ppt_v1_voltage_lookup_record *record)
   2197{
   2198	uint32_t i;
   2199
   2200	PP_ASSERT_WITH_CODE((NULL != look_up_table),
   2201		"Lookup Table empty.", return -EINVAL);
   2202	PP_ASSERT_WITH_CODE((0 != look_up_table->count),
   2203		"Lookup Table empty.", return -EINVAL);
   2204
   2205	i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
   2206	PP_ASSERT_WITH_CODE((i >= look_up_table->count),
   2207		"Lookup Table is full.", return -EINVAL);
   2208
   2209	/* This is to avoid entering duplicate calculated records. */
   2210	for (i = 0; i < look_up_table->count; i++) {
   2211		if (look_up_table->entries[i].us_vdd == record->us_vdd) {
   2212			if (look_up_table->entries[i].us_calculated == 1)
   2213				return 0;
   2214			break;
   2215		}
   2216	}
   2217
   2218	look_up_table->entries[i].us_calculated = 1;
   2219	look_up_table->entries[i].us_vdd = record->us_vdd;
   2220	look_up_table->entries[i].us_cac_low = record->us_cac_low;
   2221	look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
   2222	look_up_table->entries[i].us_cac_high = record->us_cac_high;
   2223	/* Only increment the count when we're appending, not replacing duplicate entry. */
   2224	if (i == look_up_table->count)
   2225		look_up_table->count++;
   2226
   2227	return 0;
   2228}
   2229
   2230
   2231static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
   2232{
   2233	uint8_t entry_id;
   2234	struct phm_ppt_v1_voltage_lookup_record v_record;
   2235	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2236	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
   2237
   2238	phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
   2239	phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
   2240
   2241	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
   2242		for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
   2243			if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
   2244				v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
   2245					sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
   2246			else
   2247				v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
   2248					sclk_table->entries[entry_id].vdd_offset;
   2249
   2250			sclk_table->entries[entry_id].vddc =
   2251				v_record.us_cac_low = v_record.us_cac_mid =
   2252				v_record.us_cac_high = v_record.us_vdd;
   2253
   2254			phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
   2255		}
   2256
   2257		for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
   2258			if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
   2259				v_record.us_vdd = mclk_table->entries[entry_id].vddc +
   2260					mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
   2261			else
   2262				v_record.us_vdd = mclk_table->entries[entry_id].vddc +
   2263					mclk_table->entries[entry_id].vdd_offset;
   2264
   2265			mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
   2266				v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
   2267			phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
   2268		}
   2269	}
   2270	return 0;
   2271}
   2272
   2273static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
   2274{
   2275	uint8_t entry_id;
   2276	struct phm_ppt_v1_voltage_lookup_record v_record;
   2277	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2278	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
   2279	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
   2280
   2281	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
   2282		for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
   2283			if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
   2284				v_record.us_vdd = mm_table->entries[entry_id].vddc +
   2285					mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
   2286			else
   2287				v_record.us_vdd = mm_table->entries[entry_id].vddc +
   2288					mm_table->entries[entry_id].vddgfx_offset;
   2289
   2290			/* Add the calculated VDDGFX to the VDDGFX lookup table */
   2291			mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
   2292				v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
   2293			phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
   2294		}
   2295	}
   2296	return 0;
   2297}
   2298
   2299static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
   2300		struct phm_ppt_v1_voltage_lookup_table *lookup_table)
   2301{
   2302	uint32_t table_size, i, j;
   2303	table_size = lookup_table->count;
   2304
   2305	PP_ASSERT_WITH_CODE(0 != lookup_table->count,
   2306		"Lookup table is empty", return -EINVAL);
   2307
   2308	/* Sorting voltages */
   2309	for (i = 0; i < table_size - 1; i++) {
   2310		for (j = i + 1; j > 0; j--) {
   2311			if (lookup_table->entries[j].us_vdd <
   2312					lookup_table->entries[j - 1].us_vdd) {
   2313				swap(lookup_table->entries[j - 1],
   2314				     lookup_table->entries[j]);
   2315			}
   2316		}
   2317	}
   2318
   2319	return 0;
   2320}
   2321
   2322static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
   2323{
   2324	int result = 0;
   2325	int tmp_result;
   2326	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2327	struct phm_ppt_v1_information *table_info =
   2328			(struct phm_ppt_v1_information *)(hwmgr->pptable);
   2329
   2330	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
   2331		tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
   2332			table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
   2333		if (tmp_result != 0)
   2334			result = tmp_result;
   2335
   2336		smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
   2337			&table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
   2338	} else {
   2339
   2340		tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
   2341				table_info->vddc_lookup_table, &(data->vddc_leakage));
   2342		if (tmp_result)
   2343			result = tmp_result;
   2344
   2345		tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
   2346				&(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
   2347		if (tmp_result)
   2348			result = tmp_result;
   2349	}
   2350
   2351	tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
   2352	if (tmp_result)
   2353		result = tmp_result;
   2354
   2355	tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
   2356	if (tmp_result)
   2357		result = tmp_result;
   2358
   2359	tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
   2360	if (tmp_result)
   2361		result = tmp_result;
   2362
   2363	tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
   2364	if (tmp_result)
   2365		result = tmp_result;
   2366
   2367	tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
   2368	if (tmp_result)
   2369		result = tmp_result;
   2370
   2371	return result;
   2372}
   2373
   2374static int smu7_find_highest_vddc(struct pp_hwmgr *hwmgr)
   2375{
   2376	struct phm_ppt_v1_information *table_info =
   2377			(struct phm_ppt_v1_information *)(hwmgr->pptable);
   2378	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
   2379						table_info->vdd_dep_on_sclk;
   2380	struct phm_ppt_v1_voltage_lookup_table *lookup_table =
   2381						table_info->vddc_lookup_table;
   2382	uint16_t highest_voltage;
   2383	uint32_t i;
   2384
   2385	highest_voltage = allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
   2386
   2387	for (i = 0; i < lookup_table->count; i++) {
   2388		if (lookup_table->entries[i].us_vdd < ATOM_VIRTUAL_VOLTAGE_ID0 &&
   2389		    lookup_table->entries[i].us_vdd > highest_voltage)
   2390			highest_voltage = lookup_table->entries[i].us_vdd;
   2391	}
   2392
   2393	return highest_voltage;
   2394}
   2395
   2396static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
   2397{
   2398	struct phm_ppt_v1_information *table_info =
   2399			(struct phm_ppt_v1_information *)(hwmgr->pptable);
   2400
   2401	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
   2402						table_info->vdd_dep_on_sclk;
   2403	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
   2404						table_info->vdd_dep_on_mclk;
   2405
   2406	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
   2407		"VDD dependency on SCLK table is missing.",
   2408		return -EINVAL);
   2409	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
   2410		"VDD dependency on SCLK table has to have is missing.",
   2411		return -EINVAL);
   2412
   2413	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
   2414		"VDD dependency on MCLK table is missing",
   2415		return -EINVAL);
   2416	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
   2417		"VDD dependency on MCLK table has to have is missing.",
   2418		return -EINVAL);
   2419
   2420	table_info->max_clock_voltage_on_ac.sclk =
   2421		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
   2422	table_info->max_clock_voltage_on_ac.mclk =
   2423		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
   2424	if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM)
   2425		table_info->max_clock_voltage_on_ac.vddc =
   2426			smu7_find_highest_vddc(hwmgr);
   2427	else
   2428		table_info->max_clock_voltage_on_ac.vddc =
   2429			allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
   2430	table_info->max_clock_voltage_on_ac.vddci =
   2431		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
   2432
   2433	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
   2434	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
   2435	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
   2436	hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
   2437
   2438	return 0;
   2439}
   2440
   2441static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
   2442{
   2443	struct phm_ppt_v1_information *table_info =
   2444		       (struct phm_ppt_v1_information *)(hwmgr->pptable);
   2445	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
   2446	struct phm_ppt_v1_voltage_lookup_table *lookup_table;
   2447	uint32_t i;
   2448	uint32_t hw_revision, sub_vendor_id, sub_sys_id;
   2449	struct amdgpu_device *adev = hwmgr->adev;
   2450
   2451	if (table_info != NULL) {
   2452		dep_mclk_table = table_info->vdd_dep_on_mclk;
   2453		lookup_table = table_info->vddc_lookup_table;
   2454	} else
   2455		return 0;
   2456
   2457	hw_revision = adev->pdev->revision;
   2458	sub_sys_id = adev->pdev->subsystem_device;
   2459	sub_vendor_id = adev->pdev->subsystem_vendor;
   2460
   2461	if (adev->pdev->device == 0x67DF && hw_revision == 0xC7 &&
   2462	    ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
   2463	     (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
   2464	     (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
   2465
   2466		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
   2467					      CGS_IND_REG__SMC,
   2468					      PWR_CKS_CNTL,
   2469					      CKS_STRETCH_AMOUNT,
   2470					      0x3);
   2471
   2472		if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
   2473			return 0;
   2474
   2475		for (i = 0; i < lookup_table->count; i++) {
   2476			if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
   2477				dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
   2478				return 0;
   2479			}
   2480		}
   2481	}
   2482	return 0;
   2483}
   2484
   2485static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
   2486{
   2487	struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
   2488	uint32_t temp_reg;
   2489	struct phm_ppt_v1_information *table_info =
   2490			(struct phm_ppt_v1_information *)(hwmgr->pptable);
   2491
   2492
   2493	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
   2494		temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
   2495		switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
   2496		case 0:
   2497			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
   2498			break;
   2499		case 1:
   2500			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
   2501			break;
   2502		case 2:
   2503			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
   2504			break;
   2505		case 3:
   2506			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
   2507			break;
   2508		case 4:
   2509			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
   2510			break;
   2511		default:
   2512			break;
   2513		}
   2514		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
   2515	}
   2516
   2517	if (table_info == NULL)
   2518		return 0;
   2519
   2520	if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
   2521		hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
   2522		hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
   2523			(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
   2524
   2525		hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
   2526			(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
   2527
   2528		hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
   2529
   2530		hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
   2531
   2532		hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
   2533			(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
   2534
   2535		hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
   2536
   2537		table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
   2538								(table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
   2539
   2540		table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
   2541		table_info->cac_dtp_table->usOperatingTempStep = 1;
   2542		table_info->cac_dtp_table->usOperatingTempHyst = 1;
   2543
   2544		hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
   2545			       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
   2546
   2547		hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
   2548			       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
   2549
   2550		hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
   2551			       table_info->cac_dtp_table->usOperatingTempMinLimit;
   2552
   2553		hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
   2554			       table_info->cac_dtp_table->usOperatingTempMaxLimit;
   2555
   2556		hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
   2557			       table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
   2558
   2559		hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
   2560			       table_info->cac_dtp_table->usOperatingTempStep;
   2561
   2562		hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
   2563			       table_info->cac_dtp_table->usTargetOperatingTemp;
   2564		if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
   2565			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
   2566					PHM_PlatformCaps_ODFuzzyFanControlSupport);
   2567	}
   2568
   2569	return 0;
   2570}
   2571
   2572/**
   2573 * smu7_patch_ppt_v0_with_vdd_leakage - Change virtual leakage voltage to actual value.
   2574 *
   2575 * @hwmgr:  the address of the powerplay hardware manager.
   2576 * @voltage: pointer to changing voltage
   2577 * @leakage_table: pointer to leakage table
   2578 */
   2579static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
   2580		uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
   2581{
   2582	uint32_t index;
   2583
   2584	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
   2585	for (index = 0; index < leakage_table->count; index++) {
   2586		/* if this voltage matches a leakage voltage ID */
   2587		/* patch with actual leakage voltage */
   2588		if (leakage_table->leakage_id[index] == *voltage) {
   2589			*voltage = leakage_table->actual_voltage[index];
   2590			break;
   2591		}
   2592	}
   2593
   2594	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
   2595		pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
   2596}
   2597
   2598
   2599static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
   2600			      struct phm_clock_voltage_dependency_table *tab)
   2601{
   2602	uint16_t i;
   2603	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2604
   2605	if (tab)
   2606		for (i = 0; i < tab->count; i++)
   2607			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
   2608						&data->vddc_leakage);
   2609
   2610	return 0;
   2611}
   2612
   2613static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
   2614			       struct phm_clock_voltage_dependency_table *tab)
   2615{
   2616	uint16_t i;
   2617	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2618
   2619	if (tab)
   2620		for (i = 0; i < tab->count; i++)
   2621			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
   2622							&data->vddci_leakage);
   2623
   2624	return 0;
   2625}
   2626
   2627static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
   2628				  struct phm_vce_clock_voltage_dependency_table *tab)
   2629{
   2630	uint16_t i;
   2631	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2632
   2633	if (tab)
   2634		for (i = 0; i < tab->count; i++)
   2635			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
   2636							&data->vddc_leakage);
   2637
   2638	return 0;
   2639}
   2640
   2641
   2642static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
   2643				  struct phm_uvd_clock_voltage_dependency_table *tab)
   2644{
   2645	uint16_t i;
   2646	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2647
   2648	if (tab)
   2649		for (i = 0; i < tab->count; i++)
   2650			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
   2651							&data->vddc_leakage);
   2652
   2653	return 0;
   2654}
   2655
   2656static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
   2657					 struct phm_phase_shedding_limits_table *tab)
   2658{
   2659	uint16_t i;
   2660	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2661
   2662	if (tab)
   2663		for (i = 0; i < tab->count; i++)
   2664			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
   2665							&data->vddc_leakage);
   2666
   2667	return 0;
   2668}
   2669
   2670static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
   2671				   struct phm_samu_clock_voltage_dependency_table *tab)
   2672{
   2673	uint16_t i;
   2674	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2675
   2676	if (tab)
   2677		for (i = 0; i < tab->count; i++)
   2678			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
   2679							&data->vddc_leakage);
   2680
   2681	return 0;
   2682}
   2683
   2684static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
   2685				  struct phm_acp_clock_voltage_dependency_table *tab)
   2686{
   2687	uint16_t i;
   2688	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2689
   2690	if (tab)
   2691		for (i = 0; i < tab->count; i++)
   2692			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
   2693					&data->vddc_leakage);
   2694
   2695	return 0;
   2696}
   2697
   2698static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
   2699				  struct phm_clock_and_voltage_limits *tab)
   2700{
   2701	uint32_t vddc, vddci;
   2702	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2703
   2704	if (tab) {
   2705		vddc = tab->vddc;
   2706		smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
   2707						   &data->vddc_leakage);
   2708		tab->vddc = vddc;
   2709		vddci = tab->vddci;
   2710		smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
   2711						   &data->vddci_leakage);
   2712		tab->vddci = vddci;
   2713	}
   2714
   2715	return 0;
   2716}
   2717
   2718static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
   2719{
   2720	uint32_t i;
   2721	uint32_t vddc;
   2722	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2723
   2724	if (tab) {
   2725		for (i = 0; i < tab->count; i++) {
   2726			vddc = (uint32_t)(tab->entries[i].Vddc);
   2727			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
   2728			tab->entries[i].Vddc = (uint16_t)vddc;
   2729		}
   2730	}
   2731
   2732	return 0;
   2733}
   2734
   2735static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
   2736{
   2737	int tmp;
   2738
   2739	tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
   2740	if (tmp)
   2741		return -EINVAL;
   2742
   2743	tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
   2744	if (tmp)
   2745		return -EINVAL;
   2746
   2747	tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
   2748	if (tmp)
   2749		return -EINVAL;
   2750
   2751	tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
   2752	if (tmp)
   2753		return -EINVAL;
   2754
   2755	tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
   2756	if (tmp)
   2757		return -EINVAL;
   2758
   2759	tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
   2760	if (tmp)
   2761		return -EINVAL;
   2762
   2763	tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
   2764	if (tmp)
   2765		return -EINVAL;
   2766
   2767	tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
   2768	if (tmp)
   2769		return -EINVAL;
   2770
   2771	tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
   2772	if (tmp)
   2773		return -EINVAL;
   2774
   2775	tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
   2776	if (tmp)
   2777		return -EINVAL;
   2778
   2779	tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
   2780	if (tmp)
   2781		return -EINVAL;
   2782
   2783	tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
   2784	if (tmp)
   2785		return -EINVAL;
   2786
   2787	return 0;
   2788}
   2789
   2790
   2791static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
   2792{
   2793	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2794
   2795	struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
   2796	struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
   2797	struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
   2798
   2799	PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
   2800		"VDDC dependency on SCLK table is missing. This table is mandatory",
   2801		return -EINVAL);
   2802	PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
   2803		"VDDC dependency on SCLK table has to have is missing. This table is mandatory",
   2804		return -EINVAL);
   2805
   2806	PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
   2807		"VDDC dependency on MCLK table is missing. This table is mandatory",
   2808		return -EINVAL);
   2809	PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
   2810		"VDD dependency on MCLK table has to have is missing. This table is mandatory",
   2811		return -EINVAL);
   2812
   2813	data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
   2814	data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
   2815
   2816	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
   2817		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
   2818	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
   2819		allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
   2820	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
   2821		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
   2822
   2823	if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
   2824		data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
   2825		data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
   2826	}
   2827
   2828	if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
   2829		hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
   2830
   2831	return 0;
   2832}
   2833
   2834static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
   2835{
   2836	kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
   2837	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
   2838	kfree(hwmgr->backend);
   2839	hwmgr->backend = NULL;
   2840
   2841	return 0;
   2842}
   2843
   2844static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
   2845{
   2846	uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
   2847	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2848	int i;
   2849
   2850	if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
   2851		for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
   2852			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
   2853			if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
   2854								virtual_voltage_id,
   2855								efuse_voltage_id) == 0) {
   2856				if (vddc != 0 && vddc != virtual_voltage_id) {
   2857					data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
   2858					data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
   2859					data->vddc_leakage.count++;
   2860				}
   2861				if (vddci != 0 && vddci != virtual_voltage_id) {
   2862					data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
   2863					data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
   2864					data->vddci_leakage.count++;
   2865				}
   2866			}
   2867		}
   2868	}
   2869	return 0;
   2870}
   2871
   2872#define LEAKAGE_ID_MSB			463
   2873#define LEAKAGE_ID_LSB			454
   2874
   2875static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
   2876{
   2877	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2878	uint32_t efuse;
   2879	uint16_t offset;
   2880	int ret = 0;
   2881
   2882	if (data->disable_edc_leakage_controller)
   2883		return 0;
   2884
   2885	ret = atomctrl_get_edc_hilo_leakage_offset_table(hwmgr,
   2886							 &data->edc_hilo_leakage_offset_from_vbios);
   2887	if (ret)
   2888		return ret;
   2889
   2890	if (data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
   2891	    data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
   2892		atomctrl_read_efuse(hwmgr, LEAKAGE_ID_LSB, LEAKAGE_ID_MSB, &efuse);
   2893		if (efuse < data->edc_hilo_leakage_offset_from_vbios.usHiLoLeakageThreshold)
   2894			offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset;
   2895		else
   2896			offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset;
   2897
   2898		ret = atomctrl_get_edc_leakage_table(hwmgr,
   2899						     &data->edc_leakage_table,
   2900						     offset);
   2901		if (ret)
   2902			return ret;
   2903	}
   2904
   2905	return ret;
   2906}
   2907
   2908static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
   2909{
   2910	struct smu7_hwmgr *data;
   2911	int result = 0;
   2912
   2913	data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
   2914	if (data == NULL)
   2915		return -ENOMEM;
   2916
   2917	hwmgr->backend = data;
   2918	smu7_patch_voltage_workaround(hwmgr);
   2919	smu7_init_dpm_defaults(hwmgr);
   2920
   2921	/* Get leakage voltage based on leakage ID. */
   2922	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   2923			PHM_PlatformCaps_EVV)) {
   2924		result = smu7_get_evv_voltages(hwmgr);
   2925		if (result) {
   2926			pr_info("Get EVV Voltage Failed.  Abort Driver loading!\n");
   2927			return -EINVAL;
   2928		}
   2929	} else {
   2930		smu7_get_elb_voltages(hwmgr);
   2931	}
   2932
   2933	if (hwmgr->pp_table_version == PP_TABLE_V1) {
   2934		smu7_complete_dependency_tables(hwmgr);
   2935		smu7_set_private_data_based_on_pptable_v1(hwmgr);
   2936	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
   2937		smu7_patch_dependency_tables_with_leakage(hwmgr);
   2938		smu7_set_private_data_based_on_pptable_v0(hwmgr);
   2939	}
   2940
   2941	/* Initalize Dynamic State Adjustment Rule Settings */
   2942	result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
   2943
   2944	if (0 == result) {
   2945		struct amdgpu_device *adev = hwmgr->adev;
   2946
   2947		data->is_tlu_enabled = false;
   2948
   2949		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
   2950							SMU7_MAX_HARDWARE_POWERLEVELS;
   2951		hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
   2952		hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
   2953
   2954		data->pcie_gen_cap = adev->pm.pcie_gen_mask;
   2955		if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
   2956			data->pcie_spc_cap = 20;
   2957		else
   2958			data->pcie_spc_cap = 16;
   2959		data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
   2960
   2961		hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
   2962/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
   2963		hwmgr->platform_descriptor.clockStep.engineClock = 500;
   2964		hwmgr->platform_descriptor.clockStep.memoryClock = 500;
   2965		smu7_thermal_parameter_init(hwmgr);
   2966	} else {
   2967		/* Ignore return value in here, we are cleaning up a mess. */
   2968		smu7_hwmgr_backend_fini(hwmgr);
   2969	}
   2970
   2971	result = smu7_update_edc_leakage_table(hwmgr);
   2972	if (result)
   2973		return result;
   2974
   2975	return 0;
   2976}
   2977
   2978static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
   2979{
   2980	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   2981	uint32_t level, tmp;
   2982
   2983	if (!data->pcie_dpm_key_disabled) {
   2984		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
   2985			level = 0;
   2986			tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
   2987			while (tmp >>= 1)
   2988				level++;
   2989
   2990			if (level)
   2991				smum_send_msg_to_smc_with_parameter(hwmgr,
   2992						PPSMC_MSG_PCIeDPM_ForceLevel, level,
   2993						NULL);
   2994		}
   2995	}
   2996
   2997	if (!data->sclk_dpm_key_disabled) {
   2998		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
   2999			level = 0;
   3000			tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
   3001			while (tmp >>= 1)
   3002				level++;
   3003
   3004			if (level)
   3005				smum_send_msg_to_smc_with_parameter(hwmgr,
   3006						PPSMC_MSG_SCLKDPM_SetEnabledMask,
   3007						(1 << level),
   3008						NULL);
   3009		}
   3010	}
   3011
   3012	if (!data->mclk_dpm_key_disabled) {
   3013		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
   3014			level = 0;
   3015			tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
   3016			while (tmp >>= 1)
   3017				level++;
   3018
   3019			if (level)
   3020				smum_send_msg_to_smc_with_parameter(hwmgr,
   3021						PPSMC_MSG_MCLKDPM_SetEnabledMask,
   3022						(1 << level),
   3023						NULL);
   3024		}
   3025	}
   3026
   3027	return 0;
   3028}
   3029
   3030static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
   3031{
   3032	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   3033
   3034	if (hwmgr->pp_table_version == PP_TABLE_V1)
   3035		phm_apply_dal_min_voltage_request(hwmgr);
   3036/* TO DO  for v0 iceland and Ci*/
   3037
   3038	if (!data->sclk_dpm_key_disabled) {
   3039		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
   3040			smum_send_msg_to_smc_with_parameter(hwmgr,
   3041					PPSMC_MSG_SCLKDPM_SetEnabledMask,
   3042					data->dpm_level_enable_mask.sclk_dpm_enable_mask,
   3043					NULL);
   3044	}
   3045
   3046	if (!data->mclk_dpm_key_disabled) {
   3047		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
   3048			smum_send_msg_to_smc_with_parameter(hwmgr,
   3049					PPSMC_MSG_MCLKDPM_SetEnabledMask,
   3050					data->dpm_level_enable_mask.mclk_dpm_enable_mask,
   3051					NULL);
   3052	}
   3053
   3054	return 0;
   3055}
   3056
   3057static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
   3058{
   3059	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   3060
   3061	if (!smum_is_dpm_running(hwmgr))
   3062		return -EINVAL;
   3063
   3064	if (!data->pcie_dpm_key_disabled) {
   3065		smum_send_msg_to_smc(hwmgr,
   3066				PPSMC_MSG_PCIeDPM_UnForceLevel,
   3067				NULL);
   3068	}
   3069
   3070	return smu7_upload_dpm_level_enable_mask(hwmgr);
   3071}
   3072
   3073static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
   3074{
   3075	struct smu7_hwmgr *data =
   3076			(struct smu7_hwmgr *)(hwmgr->backend);
   3077	uint32_t level;
   3078
   3079	if (!data->sclk_dpm_key_disabled)
   3080		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
   3081			level = phm_get_lowest_enabled_level(hwmgr,
   3082							      data->dpm_level_enable_mask.sclk_dpm_enable_mask);
   3083			smum_send_msg_to_smc_with_parameter(hwmgr,
   3084							    PPSMC_MSG_SCLKDPM_SetEnabledMask,
   3085							    (1 << level),
   3086							    NULL);
   3087
   3088	}
   3089
   3090	if (!data->mclk_dpm_key_disabled) {
   3091		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
   3092			level = phm_get_lowest_enabled_level(hwmgr,
   3093							      data->dpm_level_enable_mask.mclk_dpm_enable_mask);
   3094			smum_send_msg_to_smc_with_parameter(hwmgr,
   3095							    PPSMC_MSG_MCLKDPM_SetEnabledMask,
   3096							    (1 << level),
   3097							    NULL);
   3098		}
   3099	}
   3100
   3101	if (!data->pcie_dpm_key_disabled) {
   3102		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
   3103			level = phm_get_lowest_enabled_level(hwmgr,
   3104							      data->dpm_level_enable_mask.pcie_dpm_enable_mask);
   3105			smum_send_msg_to_smc_with_parameter(hwmgr,
   3106							    PPSMC_MSG_PCIeDPM_ForceLevel,
   3107							    (level),
   3108							    NULL);
   3109		}
   3110	}
   3111
   3112	return 0;
   3113}
   3114
   3115static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
   3116				uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
   3117{
   3118	uint32_t percentage;
   3119	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   3120	struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
   3121	int32_t tmp_mclk;
   3122	int32_t tmp_sclk;
   3123	int32_t count;
   3124
   3125	if (golden_dpm_table->mclk_table.count < 1)
   3126		return -EINVAL;
   3127
   3128	percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
   3129			golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
   3130
   3131	if (golden_dpm_table->mclk_table.count == 1) {
   3132		percentage = 70;
   3133		tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
   3134		*mclk_mask = golden_dpm_table->mclk_table.count - 1;
   3135	} else {
   3136		tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
   3137		*mclk_mask = golden_dpm_table->mclk_table.count - 2;
   3138	}
   3139
   3140	tmp_sclk = tmp_mclk * percentage / 100;
   3141
   3142	if (hwmgr->pp_table_version == PP_TABLE_V0) {
   3143		for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
   3144			count >= 0; count--) {
   3145			if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
   3146				tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
   3147				*sclk_mask = count;
   3148				break;
   3149			}
   3150		}
   3151		if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
   3152			*sclk_mask = 0;
   3153			tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
   3154		}
   3155
   3156		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
   3157			*sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
   3158	} else if (hwmgr->pp_table_version == PP_TABLE_V1) {
   3159		struct phm_ppt_v1_information *table_info =
   3160				(struct phm_ppt_v1_information *)(hwmgr->pptable);
   3161
   3162		for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
   3163			if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
   3164				tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
   3165				*sclk_mask = count;
   3166				break;
   3167			}
   3168		}
   3169		if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
   3170			*sclk_mask = 0;
   3171			tmp_sclk =  table_info->vdd_dep_on_sclk->entries[0].clk;
   3172		}
   3173
   3174		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
   3175			*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
   3176	}
   3177
   3178	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
   3179		*mclk_mask = 0;
   3180	else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
   3181		*mclk_mask = golden_dpm_table->mclk_table.count - 1;
   3182
   3183	*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
   3184	hwmgr->pstate_sclk = tmp_sclk;
   3185	hwmgr->pstate_mclk = tmp_mclk;
   3186
   3187	return 0;
   3188}
   3189
   3190static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
   3191				enum amd_dpm_forced_level level)
   3192{
   3193	int ret = 0;
   3194	uint32_t sclk_mask = 0;
   3195	uint32_t mclk_mask = 0;
   3196	uint32_t pcie_mask = 0;
   3197
   3198	if (hwmgr->pstate_sclk == 0)
   3199		smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
   3200
   3201	switch (level) {
   3202	case AMD_DPM_FORCED_LEVEL_HIGH:
   3203		ret = smu7_force_dpm_highest(hwmgr);
   3204		break;
   3205	case AMD_DPM_FORCED_LEVEL_LOW:
   3206		ret = smu7_force_dpm_lowest(hwmgr);
   3207		break;
   3208	case AMD_DPM_FORCED_LEVEL_AUTO:
   3209		ret = smu7_unforce_dpm_levels(hwmgr);
   3210		break;
   3211	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
   3212	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
   3213	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
   3214	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
   3215		ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
   3216		if (ret)
   3217			return ret;
   3218		smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
   3219		smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
   3220		smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
   3221		break;
   3222	case AMD_DPM_FORCED_LEVEL_MANUAL:
   3223	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
   3224	default:
   3225		break;
   3226	}
   3227
   3228	if (!ret) {
   3229		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
   3230			smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
   3231		else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
   3232			smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
   3233	}
   3234	return ret;
   3235}
   3236
   3237static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
   3238{
   3239	return sizeof(struct smu7_power_state);
   3240}
   3241
   3242static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
   3243				 uint32_t vblank_time_us)
   3244{
   3245	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   3246	uint32_t switch_limit_us;
   3247
   3248	switch (hwmgr->chip_id) {
   3249	case CHIP_POLARIS10:
   3250	case CHIP_POLARIS11:
   3251	case CHIP_POLARIS12:
   3252		if (hwmgr->is_kicker || (hwmgr->chip_id == CHIP_POLARIS12))
   3253			switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
   3254		else
   3255			switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
   3256		break;
   3257	case CHIP_VEGAM:
   3258		switch_limit_us = 30;
   3259		break;
   3260	default:
   3261		switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
   3262		break;
   3263	}
   3264
   3265	if (vblank_time_us < switch_limit_us)
   3266		return true;
   3267	else
   3268		return false;
   3269}
   3270
   3271static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
   3272				struct pp_power_state *request_ps,
   3273			const struct pp_power_state *current_ps)
   3274{
   3275	struct amdgpu_device *adev = hwmgr->adev;
   3276	struct smu7_power_state *smu7_ps =
   3277				cast_phw_smu7_power_state(&request_ps->hardware);
   3278	uint32_t sclk;
   3279	uint32_t mclk;
   3280	struct PP_Clocks minimum_clocks = {0};
   3281	bool disable_mclk_switching;
   3282	bool disable_mclk_switching_for_frame_lock;
   3283	bool disable_mclk_switching_for_display;
   3284	const struct phm_clock_and_voltage_limits *max_limits;
   3285	uint32_t i;
   3286	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   3287	struct phm_ppt_v1_information *table_info =
   3288			(struct phm_ppt_v1_information *)(hwmgr->pptable);
   3289	int32_t count;
   3290	int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
   3291	uint32_t latency;
   3292	bool latency_allowed = false;
   3293
   3294	data->battery_state = (PP_StateUILabel_Battery ==
   3295			request_ps->classification.ui_label);
   3296	data->mclk_ignore_signal = false;
   3297
   3298	max_limits = adev->pm.ac_power ?
   3299			&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
   3300			&(hwmgr->dyn_state.max_clock_voltage_on_dc);
   3301
   3302	/* Cap clock DPM tables at DC MAX if it is in DC. */
   3303	if (!adev->pm.ac_power) {
   3304		for (i = 0; i < smu7_ps->performance_level_count; i++) {
   3305			if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
   3306				smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
   3307			if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
   3308				smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
   3309		}
   3310	}
   3311
   3312	minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
   3313	minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
   3314
   3315	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   3316			PHM_PlatformCaps_StablePState)) {
   3317		max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
   3318		stable_pstate_sclk = (max_limits->sclk * 75) / 100;
   3319
   3320		for (count = table_info->vdd_dep_on_sclk->count - 1;
   3321				count >= 0; count--) {
   3322			if (stable_pstate_sclk >=
   3323					table_info->vdd_dep_on_sclk->entries[count].clk) {
   3324				stable_pstate_sclk =
   3325						table_info->vdd_dep_on_sclk->entries[count].clk;
   3326				break;
   3327			}
   3328		}
   3329
   3330		if (count < 0)
   3331			stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
   3332
   3333		stable_pstate_mclk = max_limits->mclk;
   3334
   3335		minimum_clocks.engineClock = stable_pstate_sclk;
   3336		minimum_clocks.memoryClock = stable_pstate_mclk;
   3337	}
   3338
   3339	disable_mclk_switching_for_frame_lock = phm_cap_enabled(
   3340				    hwmgr->platform_descriptor.platformCaps,
   3341				    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
   3342
   3343	disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
   3344						!hwmgr->display_config->multi_monitor_in_sync) ||
   3345						(hwmgr->display_config->num_display &&
   3346						smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
   3347
   3348	disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
   3349					 disable_mclk_switching_for_display;
   3350
   3351	if (hwmgr->display_config->num_display == 0) {
   3352		if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM)
   3353			data->mclk_ignore_signal = true;
   3354		else
   3355			disable_mclk_switching = false;
   3356	}
   3357
   3358	sclk = smu7_ps->performance_levels[0].engine_clock;
   3359	mclk = smu7_ps->performance_levels[0].memory_clock;
   3360
   3361	if (disable_mclk_switching &&
   3362	    (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
   3363	    hwmgr->chip_id <= CHIP_VEGAM)))
   3364		mclk = smu7_ps->performance_levels
   3365		[smu7_ps->performance_level_count - 1].memory_clock;
   3366
   3367	if (sclk < minimum_clocks.engineClock)
   3368		sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
   3369				max_limits->sclk : minimum_clocks.engineClock;
   3370
   3371	if (mclk < minimum_clocks.memoryClock)
   3372		mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
   3373				max_limits->mclk : minimum_clocks.memoryClock;
   3374
   3375	smu7_ps->performance_levels[0].engine_clock = sclk;
   3376	smu7_ps->performance_levels[0].memory_clock = mclk;
   3377
   3378	smu7_ps->performance_levels[1].engine_clock =
   3379		(smu7_ps->performance_levels[1].engine_clock >=
   3380				smu7_ps->performance_levels[0].engine_clock) ?
   3381						smu7_ps->performance_levels[1].engine_clock :
   3382						smu7_ps->performance_levels[0].engine_clock;
   3383
   3384	if (disable_mclk_switching) {
   3385		if (mclk < smu7_ps->performance_levels[1].memory_clock)
   3386			mclk = smu7_ps->performance_levels[1].memory_clock;
   3387
   3388		if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) {
   3389			if (disable_mclk_switching_for_display) {
   3390				/* Find the lowest MCLK frequency that is within
   3391				 * the tolerable latency defined in DAL
   3392				 */
   3393				latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
   3394				for (i = 0; i < data->mclk_latency_table.count; i++) {
   3395					if (data->mclk_latency_table.entries[i].latency <= latency) {
   3396						latency_allowed = true;
   3397
   3398						if ((data->mclk_latency_table.entries[i].frequency >=
   3399								smu7_ps->performance_levels[0].memory_clock) &&
   3400						    (data->mclk_latency_table.entries[i].frequency <=
   3401								smu7_ps->performance_levels[1].memory_clock)) {
   3402							mclk = data->mclk_latency_table.entries[i].frequency;
   3403							break;
   3404						}
   3405					}
   3406				}
   3407				if ((i >= data->mclk_latency_table.count - 1) && !latency_allowed) {
   3408					data->mclk_ignore_signal = true;
   3409				} else {
   3410					data->mclk_ignore_signal = false;
   3411				}
   3412			}
   3413
   3414			if (disable_mclk_switching_for_frame_lock)
   3415				mclk = smu7_ps->performance_levels[1].memory_clock;
   3416		}
   3417
   3418		smu7_ps->performance_levels[0].memory_clock = mclk;
   3419
   3420		if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
   3421		      hwmgr->chip_id <= CHIP_VEGAM))
   3422			smu7_ps->performance_levels[1].memory_clock = mclk;
   3423	} else {
   3424		if (smu7_ps->performance_levels[1].memory_clock <
   3425				smu7_ps->performance_levels[0].memory_clock)
   3426			smu7_ps->performance_levels[1].memory_clock =
   3427					smu7_ps->performance_levels[0].memory_clock;
   3428	}
   3429
   3430	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   3431			PHM_PlatformCaps_StablePState)) {
   3432		for (i = 0; i < smu7_ps->performance_level_count; i++) {
   3433			smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
   3434			smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
   3435			smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
   3436			smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
   3437		}
   3438	}
   3439	return 0;
   3440}
   3441
   3442
   3443static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
   3444{
   3445	struct pp_power_state  *ps;
   3446	struct smu7_power_state  *smu7_ps;
   3447
   3448	if (hwmgr == NULL)
   3449		return -EINVAL;
   3450
   3451	ps = hwmgr->request_ps;
   3452
   3453	if (ps == NULL)
   3454		return -EINVAL;
   3455
   3456	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
   3457
   3458	if (low)
   3459		return smu7_ps->performance_levels[0].memory_clock;
   3460	else
   3461		return smu7_ps->performance_levels
   3462				[smu7_ps->performance_level_count-1].memory_clock;
   3463}
   3464
   3465static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
   3466{
   3467	struct pp_power_state  *ps;
   3468	struct smu7_power_state  *smu7_ps;
   3469
   3470	if (hwmgr == NULL)
   3471		return -EINVAL;
   3472
   3473	ps = hwmgr->request_ps;
   3474
   3475	if (ps == NULL)
   3476		return -EINVAL;
   3477
   3478	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
   3479
   3480	if (low)
   3481		return smu7_ps->performance_levels[0].engine_clock;
   3482	else
   3483		return smu7_ps->performance_levels
   3484				[smu7_ps->performance_level_count-1].engine_clock;
   3485}
   3486
   3487static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
   3488					struct pp_hw_power_state *hw_ps)
   3489{
   3490	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   3491	struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
   3492	ATOM_FIRMWARE_INFO_V2_2 *fw_info;
   3493	uint16_t size;
   3494	uint8_t frev, crev;
   3495	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
   3496
   3497	/* First retrieve the Boot clocks and VDDC from the firmware info table.
   3498	 * We assume here that fw_info is unchanged if this call fails.
   3499	 */
   3500	fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
   3501			&size, &frev, &crev);
   3502	if (!fw_info)
   3503		/* During a test, there is no firmware info table. */
   3504		return 0;
   3505
   3506	/* Patch the state. */
   3507	data->vbios_boot_state.sclk_bootup_value =
   3508			le32_to_cpu(fw_info->ulDefaultEngineClock);
   3509	data->vbios_boot_state.mclk_bootup_value =
   3510			le32_to_cpu(fw_info->ulDefaultMemoryClock);
   3511	data->vbios_boot_state.mvdd_bootup_value =
   3512			le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
   3513	data->vbios_boot_state.vddc_bootup_value =
   3514			le16_to_cpu(fw_info->usBootUpVDDCVoltage);
   3515	data->vbios_boot_state.vddci_bootup_value =
   3516			le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
   3517	data->vbios_boot_state.pcie_gen_bootup_value =
   3518			smu7_get_current_pcie_speed(hwmgr);
   3519
   3520	data->vbios_boot_state.pcie_lane_bootup_value =
   3521			(uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
   3522
   3523	/* set boot power state */
   3524	ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
   3525	ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
   3526	ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
   3527	ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
   3528
   3529	return 0;
   3530}
   3531
   3532static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
   3533{
   3534	int result;
   3535	unsigned long ret = 0;
   3536
   3537	if (hwmgr->pp_table_version == PP_TABLE_V0) {
   3538		result = pp_tables_get_num_of_entries(hwmgr, &ret);
   3539		return result ? 0 : ret;
   3540	} else if (hwmgr->pp_table_version == PP_TABLE_V1) {
   3541		result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
   3542		return result;
   3543	}
   3544	return 0;
   3545}
   3546
   3547static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
   3548		void *state, struct pp_power_state *power_state,
   3549		void *pp_table, uint32_t classification_flag)
   3550{
   3551	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   3552	struct smu7_power_state  *smu7_power_state =
   3553			(struct smu7_power_state *)(&(power_state->hardware));
   3554	struct smu7_performance_level *performance_level;
   3555	ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
   3556	ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
   3557			(ATOM_Tonga_POWERPLAYTABLE *)pp_table;
   3558	PPTable_Generic_SubTable_Header *sclk_dep_table =
   3559			(PPTable_Generic_SubTable_Header *)
   3560			(((unsigned long)powerplay_table) +
   3561				le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
   3562
   3563	ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
   3564			(ATOM_Tonga_MCLK_Dependency_Table *)
   3565			(((unsigned long)powerplay_table) +
   3566				le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
   3567
   3568	/* The following fields are not initialized here: id orderedList allStatesList */
   3569	power_state->classification.ui_label =
   3570			(le16_to_cpu(state_entry->usClassification) &
   3571			ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
   3572			ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
   3573	power_state->classification.flags = classification_flag;
   3574	/* NOTE: There is a classification2 flag in BIOS that is not being used right now */
   3575
   3576	power_state->classification.temporary_state = false;
   3577	power_state->classification.to_be_deleted = false;
   3578
   3579	power_state->validation.disallowOnDC =
   3580			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
   3581					ATOM_Tonga_DISALLOW_ON_DC));
   3582
   3583	power_state->pcie.lanes = 0;
   3584
   3585	power_state->display.disableFrameModulation = false;
   3586	power_state->display.limitRefreshrate = false;
   3587	power_state->display.enableVariBright =
   3588			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
   3589					ATOM_Tonga_ENABLE_VARIBRIGHT));
   3590
   3591	power_state->validation.supportedPowerLevels = 0;
   3592	power_state->uvd_clocks.VCLK = 0;
   3593	power_state->uvd_clocks.DCLK = 0;
   3594	power_state->temperatures.min = 0;
   3595	power_state->temperatures.max = 0;
   3596
   3597	performance_level = &(smu7_power_state->performance_levels
   3598			[smu7_power_state->performance_level_count++]);
   3599
   3600	PP_ASSERT_WITH_CODE(
   3601			(smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
   3602			"Performance levels exceeds SMC limit!",
   3603			return -EINVAL);
   3604
   3605	PP_ASSERT_WITH_CODE(
   3606			(smu7_power_state->performance_level_count <=
   3607					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
   3608			"Performance levels exceeds Driver limit!",
   3609			return -EINVAL);
   3610
   3611	/* Performance levels are arranged from low to high. */
   3612	performance_level->memory_clock = mclk_dep_table->entries
   3613			[state_entry->ucMemoryClockIndexLow].ulMclk;
   3614	if (sclk_dep_table->ucRevId == 0)
   3615		performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
   3616			[state_entry->ucEngineClockIndexLow].ulSclk;
   3617	else if (sclk_dep_table->ucRevId == 1)
   3618		performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
   3619			[state_entry->ucEngineClockIndexLow].ulSclk;
   3620	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
   3621			state_entry->ucPCIEGenLow);
   3622	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
   3623			state_entry->ucPCIELaneLow);
   3624
   3625	performance_level = &(smu7_power_state->performance_levels
   3626			[smu7_power_state->performance_level_count++]);
   3627	performance_level->memory_clock = mclk_dep_table->entries
   3628			[state_entry->ucMemoryClockIndexHigh].ulMclk;
   3629
   3630	if (sclk_dep_table->ucRevId == 0)
   3631		performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
   3632			[state_entry->ucEngineClockIndexHigh].ulSclk;
   3633	else if (sclk_dep_table->ucRevId == 1)
   3634		performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
   3635			[state_entry->ucEngineClockIndexHigh].ulSclk;
   3636
   3637	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
   3638			state_entry->ucPCIEGenHigh);
   3639	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
   3640			state_entry->ucPCIELaneHigh);
   3641
   3642	return 0;
   3643}
   3644
   3645static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
   3646		unsigned long entry_index, struct pp_power_state *state)
   3647{
   3648	int result;
   3649	struct smu7_power_state *ps;
   3650	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   3651	struct phm_ppt_v1_information *table_info =
   3652			(struct phm_ppt_v1_information *)(hwmgr->pptable);
   3653	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
   3654			table_info->vdd_dep_on_mclk;
   3655
   3656	state->hardware.magic = PHM_VIslands_Magic;
   3657
   3658	ps = (struct smu7_power_state *)(&state->hardware);
   3659
   3660	result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
   3661			smu7_get_pp_table_entry_callback_func_v1);
   3662
   3663	/* This is the earliest time we have all the dependency table and the VBIOS boot state
   3664	 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
   3665	 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
   3666	 */
   3667	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
   3668		if (dep_mclk_table->entries[0].clk !=
   3669				data->vbios_boot_state.mclk_bootup_value)
   3670			pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
   3671					"does not match VBIOS boot MCLK level");
   3672		if (dep_mclk_table->entries[0].vddci !=
   3673				data->vbios_boot_state.vddci_bootup_value)
   3674			pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
   3675					"does not match VBIOS boot VDDCI level");
   3676	}
   3677
   3678	/* set DC compatible flag if this state supports DC */
   3679	if (!state->validation.disallowOnDC)
   3680		ps->dc_compatible = true;
   3681
   3682	if (state->classification.flags & PP_StateClassificationFlag_ACPI)
   3683		data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
   3684
   3685	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
   3686	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
   3687
   3688	if (!result) {
   3689		uint32_t i;
   3690
   3691		switch (state->classification.ui_label) {
   3692		case PP_StateUILabel_Performance:
   3693			data->use_pcie_performance_levels = true;
   3694			for (i = 0; i < ps->performance_level_count; i++) {
   3695				if (data->pcie_gen_performance.max <
   3696						ps->performance_levels[i].pcie_gen)
   3697					data->pcie_gen_performance.max =
   3698							ps->performance_levels[i].pcie_gen;
   3699
   3700				if (data->pcie_gen_performance.min >
   3701						ps->performance_levels[i].pcie_gen)
   3702					data->pcie_gen_performance.min =
   3703							ps->performance_levels[i].pcie_gen;
   3704
   3705				if (data->pcie_lane_performance.max <
   3706						ps->performance_levels[i].pcie_lane)
   3707					data->pcie_lane_performance.max =
   3708							ps->performance_levels[i].pcie_lane;
   3709				if (data->pcie_lane_performance.min >
   3710						ps->performance_levels[i].pcie_lane)
   3711					data->pcie_lane_performance.min =
   3712							ps->performance_levels[i].pcie_lane;
   3713			}
   3714			break;
   3715		case PP_StateUILabel_Battery:
   3716			data->use_pcie_power_saving_levels = true;
   3717
   3718			for (i = 0; i < ps->performance_level_count; i++) {
   3719				if (data->pcie_gen_power_saving.max <
   3720						ps->performance_levels[i].pcie_gen)
   3721					data->pcie_gen_power_saving.max =
   3722							ps->performance_levels[i].pcie_gen;
   3723
   3724				if (data->pcie_gen_power_saving.min >
   3725						ps->performance_levels[i].pcie_gen)
   3726					data->pcie_gen_power_saving.min =
   3727							ps->performance_levels[i].pcie_gen;
   3728
   3729				if (data->pcie_lane_power_saving.max <
   3730						ps->performance_levels[i].pcie_lane)
   3731					data->pcie_lane_power_saving.max =
   3732							ps->performance_levels[i].pcie_lane;
   3733
   3734				if (data->pcie_lane_power_saving.min >
   3735						ps->performance_levels[i].pcie_lane)
   3736					data->pcie_lane_power_saving.min =
   3737							ps->performance_levels[i].pcie_lane;
   3738			}
   3739			break;
   3740		default:
   3741			break;
   3742		}
   3743	}
   3744	return 0;
   3745}
   3746
   3747static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
   3748					struct pp_hw_power_state *power_state,
   3749					unsigned int index, const void *clock_info)
   3750{
   3751	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   3752	struct smu7_power_state  *ps = cast_phw_smu7_power_state(power_state);
   3753	const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
   3754	struct smu7_performance_level *performance_level;
   3755	uint32_t engine_clock, memory_clock;
   3756	uint16_t pcie_gen_from_bios;
   3757
   3758	engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
   3759	memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
   3760
   3761	if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
   3762		data->highest_mclk = memory_clock;
   3763
   3764	PP_ASSERT_WITH_CODE(
   3765			(ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
   3766			"Performance levels exceeds SMC limit!",
   3767			return -EINVAL);
   3768
   3769	PP_ASSERT_WITH_CODE(
   3770			(ps->performance_level_count <
   3771					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
   3772			"Performance levels exceeds Driver limit, Skip!",
   3773			return 0);
   3774
   3775	performance_level = &(ps->performance_levels
   3776			[ps->performance_level_count++]);
   3777
   3778	/* Performance levels are arranged from low to high. */
   3779	performance_level->memory_clock = memory_clock;
   3780	performance_level->engine_clock = engine_clock;
   3781
   3782	pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
   3783
   3784	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
   3785	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
   3786
   3787	return 0;
   3788}
   3789
   3790static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
   3791		unsigned long entry_index, struct pp_power_state *state)
   3792{
   3793	int result;
   3794	struct smu7_power_state *ps;
   3795	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   3796	struct phm_clock_voltage_dependency_table *dep_mclk_table =
   3797			hwmgr->dyn_state.vddci_dependency_on_mclk;
   3798
   3799	memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
   3800
   3801	state->hardware.magic = PHM_VIslands_Magic;
   3802
   3803	ps = (struct smu7_power_state *)(&state->hardware);
   3804
   3805	result = pp_tables_get_entry(hwmgr, entry_index, state,
   3806			smu7_get_pp_table_entry_callback_func_v0);
   3807
   3808	/*
   3809	 * This is the earliest time we have all the dependency table
   3810	 * and the VBIOS boot state as
   3811	 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
   3812	 * state if there is only one VDDCI/MCLK level, check if it's
   3813	 * the same as VBIOS boot state
   3814	 */
   3815	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
   3816		if (dep_mclk_table->entries[0].clk !=
   3817				data->vbios_boot_state.mclk_bootup_value)
   3818			pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
   3819					"does not match VBIOS boot MCLK level");
   3820		if (dep_mclk_table->entries[0].v !=
   3821				data->vbios_boot_state.vddci_bootup_value)
   3822			pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
   3823					"does not match VBIOS boot VDDCI level");
   3824	}
   3825
   3826	/* set DC compatible flag if this state supports DC */
   3827	if (!state->validation.disallowOnDC)
   3828		ps->dc_compatible = true;
   3829
   3830	if (state->classification.flags & PP_StateClassificationFlag_ACPI)
   3831		data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
   3832
   3833	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
   3834	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
   3835
   3836	if (!result) {
   3837		uint32_t i;
   3838
   3839		switch (state->classification.ui_label) {
   3840		case PP_StateUILabel_Performance:
   3841			data->use_pcie_performance_levels = true;
   3842
   3843			for (i = 0; i < ps->performance_level_count; i++) {
   3844				if (data->pcie_gen_performance.max <
   3845						ps->performance_levels[i].pcie_gen)
   3846					data->pcie_gen_performance.max =
   3847							ps->performance_levels[i].pcie_gen;
   3848
   3849				if (data->pcie_gen_performance.min >
   3850						ps->performance_levels[i].pcie_gen)
   3851					data->pcie_gen_performance.min =
   3852							ps->performance_levels[i].pcie_gen;
   3853
   3854				if (data->pcie_lane_performance.max <
   3855						ps->performance_levels[i].pcie_lane)
   3856					data->pcie_lane_performance.max =
   3857							ps->performance_levels[i].pcie_lane;
   3858
   3859				if (data->pcie_lane_performance.min >
   3860						ps->performance_levels[i].pcie_lane)
   3861					data->pcie_lane_performance.min =
   3862							ps->performance_levels[i].pcie_lane;
   3863			}
   3864			break;
   3865		case PP_StateUILabel_Battery:
   3866			data->use_pcie_power_saving_levels = true;
   3867
   3868			for (i = 0; i < ps->performance_level_count; i++) {
   3869				if (data->pcie_gen_power_saving.max <
   3870						ps->performance_levels[i].pcie_gen)
   3871					data->pcie_gen_power_saving.max =
   3872							ps->performance_levels[i].pcie_gen;
   3873
   3874				if (data->pcie_gen_power_saving.min >
   3875						ps->performance_levels[i].pcie_gen)
   3876					data->pcie_gen_power_saving.min =
   3877							ps->performance_levels[i].pcie_gen;
   3878
   3879				if (data->pcie_lane_power_saving.max <
   3880						ps->performance_levels[i].pcie_lane)
   3881					data->pcie_lane_power_saving.max =
   3882							ps->performance_levels[i].pcie_lane;
   3883
   3884				if (data->pcie_lane_power_saving.min >
   3885						ps->performance_levels[i].pcie_lane)
   3886					data->pcie_lane_power_saving.min =
   3887							ps->performance_levels[i].pcie_lane;
   3888			}
   3889			break;
   3890		default:
   3891			break;
   3892		}
   3893	}
   3894	return 0;
   3895}
   3896
   3897static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
   3898		unsigned long entry_index, struct pp_power_state *state)
   3899{
   3900	if (hwmgr->pp_table_version == PP_TABLE_V0)
   3901		return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
   3902	else if (hwmgr->pp_table_version == PP_TABLE_V1)
   3903		return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
   3904
   3905	return 0;
   3906}
   3907
   3908static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
   3909{
   3910	struct amdgpu_device *adev = hwmgr->adev;
   3911	int i;
   3912	u32 tmp = 0;
   3913
   3914	if (!query)
   3915		return -EINVAL;
   3916
   3917	/*
   3918	 * PPSMC_MSG_GetCurrPkgPwr is not supported on:
   3919	 *  - Hawaii
   3920	 *  - Bonaire
   3921	 *  - Fiji
   3922	 *  - Tonga
   3923	 */
   3924	if ((adev->asic_type != CHIP_HAWAII) &&
   3925	    (adev->asic_type != CHIP_BONAIRE) &&
   3926	    (adev->asic_type != CHIP_FIJI) &&
   3927	    (adev->asic_type != CHIP_TONGA)) {
   3928		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
   3929		*query = tmp;
   3930
   3931		if (tmp != 0)
   3932			return 0;
   3933	}
   3934
   3935	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
   3936	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   3937							ixSMU_PM_STATUS_95, 0);
   3938
   3939	for (i = 0; i < 10; i++) {
   3940		msleep(500);
   3941		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
   3942		tmp = cgs_read_ind_register(hwmgr->device,
   3943						CGS_IND_REG__SMC,
   3944						ixSMU_PM_STATUS_95);
   3945		if (tmp != 0)
   3946			break;
   3947	}
   3948	*query = tmp;
   3949
   3950	return 0;
   3951}
   3952
   3953static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
   3954			    void *value, int *size)
   3955{
   3956	uint32_t sclk, mclk, activity_percent;
   3957	uint32_t offset, val_vid;
   3958	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   3959
   3960	/* size must be at least 4 bytes for all sensors */
   3961	if (*size < 4)
   3962		return -EINVAL;
   3963
   3964	switch (idx) {
   3965	case AMDGPU_PP_SENSOR_GFX_SCLK:
   3966		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
   3967		*((uint32_t *)value) = sclk;
   3968		*size = 4;
   3969		return 0;
   3970	case AMDGPU_PP_SENSOR_GFX_MCLK:
   3971		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
   3972		*((uint32_t *)value) = mclk;
   3973		*size = 4;
   3974		return 0;
   3975	case AMDGPU_PP_SENSOR_GPU_LOAD:
   3976	case AMDGPU_PP_SENSOR_MEM_LOAD:
   3977		offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
   3978								SMU_SoftRegisters,
   3979								(idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
   3980								AverageGraphicsActivity:
   3981								AverageMemoryActivity);
   3982
   3983		activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
   3984		activity_percent += 0x80;
   3985		activity_percent >>= 8;
   3986		*((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
   3987		*size = 4;
   3988		return 0;
   3989	case AMDGPU_PP_SENSOR_GPU_TEMP:
   3990		*((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
   3991		*size = 4;
   3992		return 0;
   3993	case AMDGPU_PP_SENSOR_UVD_POWER:
   3994		*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
   3995		*size = 4;
   3996		return 0;
   3997	case AMDGPU_PP_SENSOR_VCE_POWER:
   3998		*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
   3999		*size = 4;
   4000		return 0;
   4001	case AMDGPU_PP_SENSOR_GPU_POWER:
   4002		return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
   4003	case AMDGPU_PP_SENSOR_VDDGFX:
   4004		if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
   4005		    (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
   4006			val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
   4007					CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
   4008		else
   4009			val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
   4010					CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
   4011
   4012		*((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
   4013		return 0;
   4014	default:
   4015		return -EOPNOTSUPP;
   4016	}
   4017}
   4018
   4019static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
   4020{
   4021	const struct phm_set_power_state_input *states =
   4022			(const struct phm_set_power_state_input *)input;
   4023	const struct smu7_power_state *smu7_ps =
   4024			cast_const_phw_smu7_power_state(states->pnew_state);
   4025	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4026	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
   4027	uint32_t sclk = smu7_ps->performance_levels
   4028			[smu7_ps->performance_level_count - 1].engine_clock;
   4029	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
   4030	uint32_t mclk = smu7_ps->performance_levels
   4031			[smu7_ps->performance_level_count - 1].memory_clock;
   4032	struct PP_Clocks min_clocks = {0};
   4033	uint32_t i;
   4034
   4035	for (i = 0; i < sclk_table->count; i++) {
   4036		if (sclk == sclk_table->dpm_levels[i].value)
   4037			break;
   4038	}
   4039
   4040	if (i >= sclk_table->count) {
   4041		if (sclk > sclk_table->dpm_levels[i-1].value) {
   4042			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
   4043			sclk_table->dpm_levels[i-1].value = sclk;
   4044		}
   4045	} else {
   4046	/* TODO: Check SCLK in DAL's minimum clocks
   4047	 * in case DeepSleep divider update is required.
   4048	 */
   4049		if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
   4050			(min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
   4051				data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
   4052			data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
   4053	}
   4054
   4055	for (i = 0; i < mclk_table->count; i++) {
   4056		if (mclk == mclk_table->dpm_levels[i].value)
   4057			break;
   4058	}
   4059
   4060	if (i >= mclk_table->count) {
   4061		if (mclk > mclk_table->dpm_levels[i-1].value) {
   4062			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
   4063			mclk_table->dpm_levels[i-1].value = mclk;
   4064		}
   4065	}
   4066
   4067	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
   4068		data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
   4069
   4070	return 0;
   4071}
   4072
   4073static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
   4074		const struct smu7_power_state *smu7_ps)
   4075{
   4076	uint32_t i;
   4077	uint32_t sclk, max_sclk = 0;
   4078	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4079	struct smu7_dpm_table *dpm_table = &data->dpm_table;
   4080
   4081	for (i = 0; i < smu7_ps->performance_level_count; i++) {
   4082		sclk = smu7_ps->performance_levels[i].engine_clock;
   4083		if (max_sclk < sclk)
   4084			max_sclk = sclk;
   4085	}
   4086
   4087	for (i = 0; i < dpm_table->sclk_table.count; i++) {
   4088		if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
   4089			return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
   4090					dpm_table->pcie_speed_table.dpm_levels
   4091					[dpm_table->pcie_speed_table.count - 1].value :
   4092					dpm_table->pcie_speed_table.dpm_levels[i].value);
   4093	}
   4094
   4095	return 0;
   4096}
   4097
   4098static int smu7_request_link_speed_change_before_state_change(
   4099		struct pp_hwmgr *hwmgr, const void *input)
   4100{
   4101	const struct phm_set_power_state_input *states =
   4102			(const struct phm_set_power_state_input *)input;
   4103	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4104	const struct smu7_power_state *smu7_nps =
   4105			cast_const_phw_smu7_power_state(states->pnew_state);
   4106	const struct smu7_power_state *polaris10_cps =
   4107			cast_const_phw_smu7_power_state(states->pcurrent_state);
   4108
   4109	uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
   4110	uint16_t current_link_speed;
   4111
   4112	if (data->force_pcie_gen == PP_PCIEGenInvalid)
   4113		current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
   4114	else
   4115		current_link_speed = data->force_pcie_gen;
   4116
   4117	data->force_pcie_gen = PP_PCIEGenInvalid;
   4118	data->pspp_notify_required = false;
   4119
   4120	if (target_link_speed > current_link_speed) {
   4121		switch (target_link_speed) {
   4122#ifdef CONFIG_ACPI
   4123		case PP_PCIEGen3:
   4124			if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
   4125				break;
   4126			data->force_pcie_gen = PP_PCIEGen2;
   4127			if (current_link_speed == PP_PCIEGen2)
   4128				break;
   4129			fallthrough;
   4130		case PP_PCIEGen2:
   4131			if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
   4132				break;
   4133			fallthrough;
   4134#endif
   4135		default:
   4136			data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
   4137			break;
   4138		}
   4139	} else {
   4140		if (target_link_speed < current_link_speed)
   4141			data->pspp_notify_required = true;
   4142	}
   4143
   4144	return 0;
   4145}
   4146
   4147static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
   4148{
   4149	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4150
   4151	if (0 == data->need_update_smu7_dpm_table)
   4152		return 0;
   4153
   4154	if ((0 == data->sclk_dpm_key_disabled) &&
   4155		(data->need_update_smu7_dpm_table &
   4156			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
   4157		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
   4158				"Trying to freeze SCLK DPM when DPM is disabled",
   4159				);
   4160		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
   4161				PPSMC_MSG_SCLKDPM_FreezeLevel,
   4162				NULL),
   4163				"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
   4164				return -EINVAL);
   4165	}
   4166
   4167	if ((0 == data->mclk_dpm_key_disabled) &&
   4168		!data->mclk_ignore_signal &&
   4169		(data->need_update_smu7_dpm_table &
   4170		 DPMTABLE_OD_UPDATE_MCLK)) {
   4171		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
   4172				"Trying to freeze MCLK DPM when DPM is disabled",
   4173				);
   4174		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
   4175				PPSMC_MSG_MCLKDPM_FreezeLevel,
   4176				NULL),
   4177				"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
   4178				return -EINVAL);
   4179	}
   4180
   4181	return 0;
   4182}
   4183
   4184static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
   4185		struct pp_hwmgr *hwmgr, const void *input)
   4186{
   4187	int result = 0;
   4188	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4189	struct smu7_dpm_table *dpm_table = &data->dpm_table;
   4190	uint32_t count;
   4191	struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
   4192	struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
   4193	struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
   4194
   4195	if (0 == data->need_update_smu7_dpm_table)
   4196		return 0;
   4197
   4198	if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
   4199		for (count = 0; count < dpm_table->sclk_table.count; count++) {
   4200			dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
   4201			dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
   4202		}
   4203	}
   4204
   4205	if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
   4206		for (count = 0; count < dpm_table->mclk_table.count; count++) {
   4207			dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
   4208			dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
   4209		}
   4210	}
   4211
   4212	if (data->need_update_smu7_dpm_table &
   4213			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
   4214		result = smum_populate_all_graphic_levels(hwmgr);
   4215		PP_ASSERT_WITH_CODE((0 == result),
   4216				"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
   4217				return result);
   4218	}
   4219
   4220	if (data->need_update_smu7_dpm_table &
   4221			(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
   4222		/*populate MCLK dpm table to SMU7 */
   4223		result = smum_populate_all_memory_levels(hwmgr);
   4224		PP_ASSERT_WITH_CODE((0 == result),
   4225				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
   4226				return result);
   4227	}
   4228
   4229	return result;
   4230}
   4231
   4232static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
   4233			  struct smu7_single_dpm_table *dpm_table,
   4234			uint32_t low_limit, uint32_t high_limit)
   4235{
   4236	uint32_t i;
   4237
   4238	/* force the trim if mclk_switching is disabled to prevent flicker */
   4239	bool force_trim = (low_limit == high_limit);
   4240	for (i = 0; i < dpm_table->count; i++) {
   4241	/*skip the trim if od is enabled*/
   4242		if ((!hwmgr->od_enabled || force_trim)
   4243			&& (dpm_table->dpm_levels[i].value < low_limit
   4244			|| dpm_table->dpm_levels[i].value > high_limit))
   4245			dpm_table->dpm_levels[i].enabled = false;
   4246		else
   4247			dpm_table->dpm_levels[i].enabled = true;
   4248	}
   4249
   4250	return 0;
   4251}
   4252
   4253static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
   4254		const struct smu7_power_state *smu7_ps)
   4255{
   4256	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4257	uint32_t high_limit_count;
   4258
   4259	PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
   4260			"power state did not have any performance level",
   4261			return -EINVAL);
   4262
   4263	high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
   4264
   4265	smu7_trim_single_dpm_states(hwmgr,
   4266			&(data->dpm_table.sclk_table),
   4267			smu7_ps->performance_levels[0].engine_clock,
   4268			smu7_ps->performance_levels[high_limit_count].engine_clock);
   4269
   4270	smu7_trim_single_dpm_states(hwmgr,
   4271			&(data->dpm_table.mclk_table),
   4272			smu7_ps->performance_levels[0].memory_clock,
   4273			smu7_ps->performance_levels[high_limit_count].memory_clock);
   4274
   4275	return 0;
   4276}
   4277
   4278static int smu7_generate_dpm_level_enable_mask(
   4279		struct pp_hwmgr *hwmgr, const void *input)
   4280{
   4281	int result = 0;
   4282	const struct phm_set_power_state_input *states =
   4283			(const struct phm_set_power_state_input *)input;
   4284	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4285	const struct smu7_power_state *smu7_ps =
   4286			cast_const_phw_smu7_power_state(states->pnew_state);
   4287
   4288
   4289	result = smu7_trim_dpm_states(hwmgr, smu7_ps);
   4290	if (result)
   4291		return result;
   4292
   4293	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
   4294			phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
   4295	data->dpm_level_enable_mask.mclk_dpm_enable_mask =
   4296			phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
   4297	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
   4298			phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
   4299
   4300	return 0;
   4301}
   4302
   4303static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
   4304{
   4305	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4306
   4307	if (0 == data->need_update_smu7_dpm_table)
   4308		return 0;
   4309
   4310	if ((0 == data->sclk_dpm_key_disabled) &&
   4311		(data->need_update_smu7_dpm_table &
   4312		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
   4313
   4314		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
   4315				"Trying to Unfreeze SCLK DPM when DPM is disabled",
   4316				);
   4317		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
   4318				PPSMC_MSG_SCLKDPM_UnfreezeLevel,
   4319				NULL),
   4320			"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
   4321			return -EINVAL);
   4322	}
   4323
   4324	if ((0 == data->mclk_dpm_key_disabled) &&
   4325		!data->mclk_ignore_signal &&
   4326		(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
   4327
   4328		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
   4329				"Trying to Unfreeze MCLK DPM when DPM is disabled",
   4330				);
   4331		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
   4332				PPSMC_MSG_MCLKDPM_UnfreezeLevel,
   4333				NULL),
   4334		    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
   4335		    return -EINVAL);
   4336	}
   4337
   4338	data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
   4339
   4340	return 0;
   4341}
   4342
   4343static int smu7_notify_link_speed_change_after_state_change(
   4344		struct pp_hwmgr *hwmgr, const void *input)
   4345{
   4346	const struct phm_set_power_state_input *states =
   4347			(const struct phm_set_power_state_input *)input;
   4348	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4349	const struct smu7_power_state *smu7_ps =
   4350			cast_const_phw_smu7_power_state(states->pnew_state);
   4351	uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
   4352	uint8_t  request;
   4353
   4354	if (data->pspp_notify_required) {
   4355		if (target_link_speed == PP_PCIEGen3)
   4356			request = PCIE_PERF_REQ_GEN3;
   4357		else if (target_link_speed == PP_PCIEGen2)
   4358			request = PCIE_PERF_REQ_GEN2;
   4359		else
   4360			request = PCIE_PERF_REQ_GEN1;
   4361
   4362		if (request == PCIE_PERF_REQ_GEN1 &&
   4363				smu7_get_current_pcie_speed(hwmgr) > 0)
   4364			return 0;
   4365
   4366#ifdef CONFIG_ACPI
   4367		if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
   4368			if (PP_PCIEGen2 == target_link_speed)
   4369				pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
   4370			else
   4371				pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
   4372		}
   4373#endif
   4374	}
   4375
   4376	return 0;
   4377}
   4378
   4379static int smu7_notify_no_display(struct pp_hwmgr *hwmgr)
   4380{
   4381	return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL) == 0) ?  0 : -EINVAL;
   4382}
   4383
   4384static int smu7_notify_has_display(struct pp_hwmgr *hwmgr)
   4385{
   4386	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4387
   4388	if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
   4389		if (hwmgr->chip_id == CHIP_VEGAM)
   4390			smum_send_msg_to_smc_with_parameter(hwmgr,
   4391					(PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
   4392					NULL);
   4393		else
   4394			smum_send_msg_to_smc_with_parameter(hwmgr,
   4395					(PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
   4396					NULL);
   4397		data->last_sent_vbi_timeout = data->frame_time_x2;
   4398	}
   4399
   4400	return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ?  0 : -EINVAL;
   4401}
   4402
   4403static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
   4404{
   4405	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4406	int result = 0;
   4407
   4408	if (data->mclk_ignore_signal)
   4409		result = smu7_notify_no_display(hwmgr);
   4410	else
   4411		result = smu7_notify_has_display(hwmgr);
   4412
   4413	return result;
   4414}
   4415
   4416static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
   4417{
   4418	int tmp_result, result = 0;
   4419	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4420
   4421	tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
   4422	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4423			"Failed to find DPM states clocks in DPM table!",
   4424			result = tmp_result);
   4425
   4426	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   4427			PHM_PlatformCaps_PCIEPerformanceRequest)) {
   4428		tmp_result =
   4429			smu7_request_link_speed_change_before_state_change(hwmgr, input);
   4430		PP_ASSERT_WITH_CODE((0 == tmp_result),
   4431				"Failed to request link speed change before state change!",
   4432				result = tmp_result);
   4433	}
   4434
   4435	tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
   4436	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4437			"Failed to freeze SCLK MCLK DPM!", result = tmp_result);
   4438
   4439	tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
   4440	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4441			"Failed to populate and upload SCLK MCLK DPM levels!",
   4442			result = tmp_result);
   4443
   4444	/*
   4445	 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
   4446	 * That effectively disables AVFS feature.
   4447	 */
   4448	if (hwmgr->hardcode_pp_table != NULL)
   4449		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
   4450
   4451	tmp_result = smu7_update_avfs(hwmgr);
   4452	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4453			"Failed to update avfs voltages!",
   4454			result = tmp_result);
   4455
   4456	tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
   4457	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4458			"Failed to generate DPM level enabled mask!",
   4459			result = tmp_result);
   4460
   4461	tmp_result = smum_update_sclk_threshold(hwmgr);
   4462	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4463			"Failed to update SCLK threshold!",
   4464			result = tmp_result);
   4465
   4466	tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
   4467	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4468			"Failed to unfreeze SCLK MCLK DPM!",
   4469			result = tmp_result);
   4470
   4471	tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
   4472	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4473			"Failed to upload DPM level enabled mask!",
   4474			result = tmp_result);
   4475
   4476	tmp_result = smu7_notify_smc_display(hwmgr);
   4477	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4478			"Failed to notify smc display settings!",
   4479			result = tmp_result);
   4480
   4481	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   4482			PHM_PlatformCaps_PCIEPerformanceRequest)) {
   4483		tmp_result =
   4484			smu7_notify_link_speed_change_after_state_change(hwmgr, input);
   4485		PP_ASSERT_WITH_CODE((0 == tmp_result),
   4486				"Failed to notify link speed change after state change!",
   4487				result = tmp_result);
   4488	}
   4489	data->apply_optimized_settings = false;
   4490	return result;
   4491}
   4492
   4493static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
   4494{
   4495	hwmgr->thermal_controller.
   4496	advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
   4497
   4498	return smum_send_msg_to_smc_with_parameter(hwmgr,
   4499			PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
   4500			NULL);
   4501}
   4502
   4503static int
   4504smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
   4505{
   4506	return 0;
   4507}
   4508
   4509/**
   4510 * smu7_program_display_gap - Programs the display gap
   4511 *
   4512 * @hwmgr:  the address of the powerplay hardware manager.
   4513 * Return:   always OK
   4514 */
   4515static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
   4516{
   4517	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4518	uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
   4519	uint32_t display_gap2;
   4520	uint32_t pre_vbi_time_in_us;
   4521	uint32_t frame_time_in_us;
   4522	uint32_t ref_clock, refresh_rate;
   4523
   4524	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
   4525	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
   4526
   4527	ref_clock =  amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
   4528	refresh_rate = hwmgr->display_config->vrefresh;
   4529
   4530	if (0 == refresh_rate)
   4531		refresh_rate = 60;
   4532
   4533	frame_time_in_us = 1000000 / refresh_rate;
   4534
   4535	pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
   4536
   4537	data->frame_time_x2 = frame_time_in_us * 2 / 100;
   4538
   4539	if (data->frame_time_x2 < 280) {
   4540		pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
   4541		data->frame_time_x2 = 280;
   4542	}
   4543
   4544	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
   4545
   4546	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
   4547
   4548	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   4549			data->soft_regs_start + smum_get_offsetof(hwmgr,
   4550							SMU_SoftRegisters,
   4551							PreVBlankGap), 0x64);
   4552
   4553	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   4554			data->soft_regs_start + smum_get_offsetof(hwmgr,
   4555							SMU_SoftRegisters,
   4556							VBlankTimeout),
   4557					(frame_time_in_us - pre_vbi_time_in_us));
   4558
   4559	return 0;
   4560}
   4561
   4562static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
   4563{
   4564	return smu7_program_display_gap(hwmgr);
   4565}
   4566
   4567/**
   4568 * smu7_set_max_fan_rpm_output - Set maximum target operating fan output RPM
   4569 *
   4570 * @hwmgr:  the address of the powerplay hardware manager.
   4571 * @us_max_fan_rpm:  max operating fan RPM value.
   4572 * Return:   The response that came from the SMC.
   4573 */
   4574static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
   4575{
   4576	hwmgr->thermal_controller.
   4577	advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
   4578
   4579	return smum_send_msg_to_smc_with_parameter(hwmgr,
   4580			PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
   4581			NULL);
   4582}
   4583
   4584static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
   4585	.process = phm_irq_process,
   4586};
   4587
   4588static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
   4589{
   4590	struct amdgpu_irq_src *source =
   4591		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
   4592
   4593	if (!source)
   4594		return -ENOMEM;
   4595
   4596	source->funcs = &smu7_irq_funcs;
   4597
   4598	amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
   4599			AMDGPU_IRQ_CLIENTID_LEGACY,
   4600			VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
   4601			source);
   4602	amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
   4603			AMDGPU_IRQ_CLIENTID_LEGACY,
   4604			VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
   4605			source);
   4606
   4607	/* Register CTF(GPIO_19) interrupt */
   4608	amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
   4609			AMDGPU_IRQ_CLIENTID_LEGACY,
   4610			VISLANDS30_IV_SRCID_GPIO_19,
   4611			source);
   4612
   4613	return 0;
   4614}
   4615
   4616static bool
   4617smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
   4618{
   4619	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4620	bool is_update_required = false;
   4621
   4622	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
   4623		is_update_required = true;
   4624
   4625	if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh)
   4626		is_update_required = true;
   4627
   4628	if (hwmgr->chip_id >= CHIP_POLARIS10 &&
   4629	    hwmgr->chip_id <= CHIP_VEGAM &&
   4630	    data->last_sent_vbi_timeout != data->frame_time_x2)
   4631		is_update_required = true;
   4632
   4633	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
   4634		if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
   4635			(data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
   4636			hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
   4637			is_update_required = true;
   4638	}
   4639	return is_update_required;
   4640}
   4641
   4642static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
   4643							   const struct smu7_performance_level *pl2)
   4644{
   4645	return ((pl1->memory_clock == pl2->memory_clock) &&
   4646		  (pl1->engine_clock == pl2->engine_clock) &&
   4647		  (pl1->pcie_gen == pl2->pcie_gen) &&
   4648		  (pl1->pcie_lane == pl2->pcie_lane));
   4649}
   4650
   4651static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
   4652		const struct pp_hw_power_state *pstate1,
   4653		const struct pp_hw_power_state *pstate2, bool *equal)
   4654{
   4655	const struct smu7_power_state *psa;
   4656	const struct smu7_power_state *psb;
   4657	int i;
   4658	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4659
   4660	if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
   4661		return -EINVAL;
   4662
   4663	psa = cast_const_phw_smu7_power_state(pstate1);
   4664	psb = cast_const_phw_smu7_power_state(pstate2);
   4665	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
   4666	if (psa->performance_level_count != psb->performance_level_count) {
   4667		*equal = false;
   4668		return 0;
   4669	}
   4670
   4671	for (i = 0; i < psa->performance_level_count; i++) {
   4672		if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
   4673			/* If we have found even one performance level pair that is different the states are different. */
   4674			*equal = false;
   4675			return 0;
   4676		}
   4677	}
   4678
   4679	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
   4680	*equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
   4681	*equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
   4682	*equal &= (psa->sclk_threshold == psb->sclk_threshold);
   4683	/* For OD call, set value based on flag */
   4684	*equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
   4685							DPMTABLE_OD_UPDATE_MCLK |
   4686							DPMTABLE_OD_UPDATE_VDDC));
   4687
   4688	return 0;
   4689}
   4690
   4691static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
   4692{
   4693	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4694
   4695	uint32_t tmp;
   4696
   4697	/* Read MC indirect register offset 0x9F bits [3:0] to see
   4698	 * if VBIOS has already loaded a full version of MC ucode
   4699	 * or not.
   4700	 */
   4701
   4702	smu7_get_mc_microcode_version(hwmgr);
   4703
   4704	data->need_long_memory_training = false;
   4705
   4706	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
   4707							ixMC_IO_DEBUG_UP_13);
   4708	tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
   4709
   4710	if (tmp & (1 << 23)) {
   4711		data->mem_latency_high = MEM_LATENCY_HIGH;
   4712		data->mem_latency_low = MEM_LATENCY_LOW;
   4713		if ((hwmgr->chip_id == CHIP_POLARIS10) ||
   4714		    (hwmgr->chip_id == CHIP_POLARIS11) ||
   4715		    (hwmgr->chip_id == CHIP_POLARIS12))
   4716			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
   4717	} else {
   4718		data->mem_latency_high = 330;
   4719		data->mem_latency_low = 330;
   4720		if ((hwmgr->chip_id == CHIP_POLARIS10) ||
   4721		    (hwmgr->chip_id == CHIP_POLARIS11) ||
   4722		    (hwmgr->chip_id == CHIP_POLARIS12))
   4723			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
   4724	}
   4725
   4726	return 0;
   4727}
   4728
   4729static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
   4730{
   4731	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4732
   4733	data->clock_registers.vCG_SPLL_FUNC_CNTL         =
   4734		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
   4735	data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
   4736		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
   4737	data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
   4738		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
   4739	data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
   4740		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
   4741	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
   4742		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
   4743	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
   4744		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
   4745	data->clock_registers.vDLL_CNTL                  =
   4746		cgs_read_register(hwmgr->device, mmDLL_CNTL);
   4747	data->clock_registers.vMCLK_PWRMGT_CNTL          =
   4748		cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
   4749	data->clock_registers.vMPLL_AD_FUNC_CNTL         =
   4750		cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
   4751	data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
   4752		cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
   4753	data->clock_registers.vMPLL_FUNC_CNTL            =
   4754		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
   4755	data->clock_registers.vMPLL_FUNC_CNTL_1          =
   4756		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
   4757	data->clock_registers.vMPLL_FUNC_CNTL_2          =
   4758		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
   4759	data->clock_registers.vMPLL_SS1                  =
   4760		cgs_read_register(hwmgr->device, mmMPLL_SS1);
   4761	data->clock_registers.vMPLL_SS2                  =
   4762		cgs_read_register(hwmgr->device, mmMPLL_SS2);
   4763	return 0;
   4764
   4765}
   4766
   4767/**
   4768 * smu7_get_memory_type - Find out if memory is GDDR5.
   4769 *
   4770 * @hwmgr:  the address of the powerplay hardware manager.
   4771 * Return:   always 0
   4772 */
   4773static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
   4774{
   4775	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4776	struct amdgpu_device *adev = hwmgr->adev;
   4777
   4778	data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
   4779
   4780	return 0;
   4781}
   4782
   4783/**
   4784 * smu7_enable_acpi_power_management - Enables Dynamic Power Management by SMC
   4785 *
   4786 * @hwmgr:  the address of the powerplay hardware manager.
   4787 * Return:   always 0
   4788 */
   4789static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
   4790{
   4791	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   4792			GENERAL_PWRMGT, STATIC_PM_EN, 1);
   4793
   4794	return 0;
   4795}
   4796
   4797/**
   4798 * smu7_init_power_gate_state - Initialize PowerGating States for different engines
   4799 *
   4800 * @hwmgr:  the address of the powerplay hardware manager.
   4801 * Return:   always 0
   4802 */
   4803static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
   4804{
   4805	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4806
   4807	data->uvd_power_gated = false;
   4808	data->vce_power_gated = false;
   4809
   4810	return 0;
   4811}
   4812
   4813static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
   4814{
   4815	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4816
   4817	data->low_sclk_interrupt_threshold = 0;
   4818	return 0;
   4819}
   4820
   4821static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
   4822{
   4823	int tmp_result, result = 0;
   4824
   4825	smu7_check_mc_firmware(hwmgr);
   4826
   4827	tmp_result = smu7_read_clock_registers(hwmgr);
   4828	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4829			"Failed to read clock registers!", result = tmp_result);
   4830
   4831	tmp_result = smu7_get_memory_type(hwmgr);
   4832	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4833			"Failed to get memory type!", result = tmp_result);
   4834
   4835	tmp_result = smu7_enable_acpi_power_management(hwmgr);
   4836	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4837			"Failed to enable ACPI power management!", result = tmp_result);
   4838
   4839	tmp_result = smu7_init_power_gate_state(hwmgr);
   4840	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4841			"Failed to init power gate state!", result = tmp_result);
   4842
   4843	tmp_result = smu7_get_mc_microcode_version(hwmgr);
   4844	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4845			"Failed to get MC microcode version!", result = tmp_result);
   4846
   4847	tmp_result = smu7_init_sclk_threshold(hwmgr);
   4848	PP_ASSERT_WITH_CODE((0 == tmp_result),
   4849			"Failed to init sclk threshold!", result = tmp_result);
   4850
   4851	return result;
   4852}
   4853
   4854static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
   4855		enum pp_clock_type type, uint32_t mask)
   4856{
   4857	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4858
   4859	if (mask == 0)
   4860		return -EINVAL;
   4861
   4862	switch (type) {
   4863	case PP_SCLK:
   4864		if (!data->sclk_dpm_key_disabled)
   4865			smum_send_msg_to_smc_with_parameter(hwmgr,
   4866					PPSMC_MSG_SCLKDPM_SetEnabledMask,
   4867					data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
   4868					NULL);
   4869		break;
   4870	case PP_MCLK:
   4871		if (!data->mclk_dpm_key_disabled)
   4872			smum_send_msg_to_smc_with_parameter(hwmgr,
   4873					PPSMC_MSG_MCLKDPM_SetEnabledMask,
   4874					data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
   4875					NULL);
   4876		break;
   4877	case PP_PCIE:
   4878	{
   4879		uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
   4880
   4881		if (!data->pcie_dpm_key_disabled) {
   4882			if (fls(tmp) != ffs(tmp))
   4883				smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
   4884						NULL);
   4885			else
   4886				smum_send_msg_to_smc_with_parameter(hwmgr,
   4887					PPSMC_MSG_PCIeDPM_ForceLevel,
   4888					fls(tmp) - 1,
   4889					NULL);
   4890		}
   4891		break;
   4892	}
   4893	default:
   4894		break;
   4895	}
   4896
   4897	return 0;
   4898}
   4899
   4900static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
   4901		enum pp_clock_type type, char *buf)
   4902{
   4903	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   4904	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
   4905	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
   4906	struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
   4907	struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
   4908	struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
   4909	struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
   4910	int size = 0;
   4911	uint32_t i, now, clock, pcie_speed;
   4912
   4913	switch (type) {
   4914	case PP_SCLK:
   4915		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
   4916
   4917		for (i = 0; i < sclk_table->count; i++) {
   4918			if (clock > sclk_table->dpm_levels[i].value)
   4919				continue;
   4920			break;
   4921		}
   4922		now = i;
   4923
   4924		for (i = 0; i < sclk_table->count; i++)
   4925			size += sprintf(buf + size, "%d: %uMhz %s\n",
   4926					i, sclk_table->dpm_levels[i].value / 100,
   4927					(i == now) ? "*" : "");
   4928		break;
   4929	case PP_MCLK:
   4930		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
   4931
   4932		for (i = 0; i < mclk_table->count; i++) {
   4933			if (clock > mclk_table->dpm_levels[i].value)
   4934				continue;
   4935			break;
   4936		}
   4937		now = i;
   4938
   4939		for (i = 0; i < mclk_table->count; i++)
   4940			size += sprintf(buf + size, "%d: %uMhz %s\n",
   4941					i, mclk_table->dpm_levels[i].value / 100,
   4942					(i == now) ? "*" : "");
   4943		break;
   4944	case PP_PCIE:
   4945		pcie_speed = smu7_get_current_pcie_speed(hwmgr);
   4946		for (i = 0; i < pcie_table->count; i++) {
   4947			if (pcie_speed != pcie_table->dpm_levels[i].value)
   4948				continue;
   4949			break;
   4950		}
   4951		now = i;
   4952
   4953		for (i = 0; i < pcie_table->count; i++)
   4954			size += sprintf(buf + size, "%d: %s %s\n", i,
   4955					(pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
   4956					(pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
   4957					(pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
   4958					(i == now) ? "*" : "");
   4959		break;
   4960	case OD_SCLK:
   4961		if (hwmgr->od_enabled) {
   4962			size += sprintf(buf + size, "%s:\n", "OD_SCLK");
   4963			for (i = 0; i < odn_sclk_table->num_of_pl; i++)
   4964				size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
   4965					i, odn_sclk_table->entries[i].clock/100,
   4966					odn_sclk_table->entries[i].vddc);
   4967		}
   4968		break;
   4969	case OD_MCLK:
   4970		if (hwmgr->od_enabled) {
   4971			size += sprintf(buf + size, "%s:\n", "OD_MCLK");
   4972			for (i = 0; i < odn_mclk_table->num_of_pl; i++)
   4973				size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
   4974					i, odn_mclk_table->entries[i].clock/100,
   4975					odn_mclk_table->entries[i].vddc);
   4976		}
   4977		break;
   4978	case OD_RANGE:
   4979		if (hwmgr->od_enabled) {
   4980			size += sprintf(buf + size, "%s:\n", "OD_RANGE");
   4981			size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
   4982				data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
   4983				hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
   4984			size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
   4985				data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
   4986				hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
   4987			size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
   4988				data->odn_dpm_table.min_vddc,
   4989				data->odn_dpm_table.max_vddc);
   4990		}
   4991		break;
   4992	default:
   4993		break;
   4994	}
   4995	return size;
   4996}
   4997
   4998static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
   4999{
   5000	switch (mode) {
   5001	case AMD_FAN_CTRL_NONE:
   5002		smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
   5003		break;
   5004	case AMD_FAN_CTRL_MANUAL:
   5005		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   5006			PHM_PlatformCaps_MicrocodeFanControl))
   5007			smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
   5008		break;
   5009	case AMD_FAN_CTRL_AUTO:
   5010		if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
   5011			smu7_fan_ctrl_start_smc_fan_control(hwmgr);
   5012		break;
   5013	default:
   5014		break;
   5015	}
   5016}
   5017
   5018static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
   5019{
   5020	return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
   5021}
   5022
   5023static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
   5024{
   5025	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5026	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
   5027	struct smu7_single_dpm_table *golden_sclk_table =
   5028			&(data->golden_dpm_table.sclk_table);
   5029	int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
   5030	int golden_value = golden_sclk_table->dpm_levels
   5031			[golden_sclk_table->count - 1].value;
   5032
   5033	value -= golden_value;
   5034	value = DIV_ROUND_UP(value * 100, golden_value);
   5035
   5036	return value;
   5037}
   5038
   5039static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
   5040{
   5041	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5042	struct smu7_single_dpm_table *golden_sclk_table =
   5043			&(data->golden_dpm_table.sclk_table);
   5044	struct pp_power_state  *ps;
   5045	struct smu7_power_state  *smu7_ps;
   5046
   5047	if (value > 20)
   5048		value = 20;
   5049
   5050	ps = hwmgr->request_ps;
   5051
   5052	if (ps == NULL)
   5053		return -EINVAL;
   5054
   5055	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
   5056
   5057	smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
   5058			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
   5059			value / 100 +
   5060			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
   5061
   5062	return 0;
   5063}
   5064
   5065static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
   5066{
   5067	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5068	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
   5069	struct smu7_single_dpm_table *golden_mclk_table =
   5070			&(data->golden_dpm_table.mclk_table);
   5071        int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
   5072	int golden_value = golden_mclk_table->dpm_levels
   5073			[golden_mclk_table->count - 1].value;
   5074
   5075	value -= golden_value;
   5076	value = DIV_ROUND_UP(value * 100, golden_value);
   5077
   5078	return value;
   5079}
   5080
   5081static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
   5082{
   5083	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5084	struct smu7_single_dpm_table *golden_mclk_table =
   5085			&(data->golden_dpm_table.mclk_table);
   5086	struct pp_power_state  *ps;
   5087	struct smu7_power_state  *smu7_ps;
   5088
   5089	if (value > 20)
   5090		value = 20;
   5091
   5092	ps = hwmgr->request_ps;
   5093
   5094	if (ps == NULL)
   5095		return -EINVAL;
   5096
   5097	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
   5098
   5099	smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
   5100			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
   5101			value / 100 +
   5102			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
   5103
   5104	return 0;
   5105}
   5106
   5107
   5108static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
   5109{
   5110	struct phm_ppt_v1_information *table_info =
   5111			(struct phm_ppt_v1_information *)hwmgr->pptable;
   5112	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
   5113	struct phm_clock_voltage_dependency_table *sclk_table;
   5114	int i;
   5115
   5116	if (hwmgr->pp_table_version == PP_TABLE_V1) {
   5117		if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
   5118			return -EINVAL;
   5119		dep_sclk_table = table_info->vdd_dep_on_sclk;
   5120		for (i = 0; i < dep_sclk_table->count; i++)
   5121			clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
   5122		clocks->count = dep_sclk_table->count;
   5123	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
   5124		sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
   5125		for (i = 0; i < sclk_table->count; i++)
   5126			clocks->clock[i] = sclk_table->entries[i].clk * 10;
   5127		clocks->count = sclk_table->count;
   5128	}
   5129
   5130	return 0;
   5131}
   5132
   5133static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
   5134{
   5135	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5136
   5137	if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
   5138		return data->mem_latency_high;
   5139	else if (clk >= MEM_FREQ_HIGH_LATENCY)
   5140		return data->mem_latency_low;
   5141	else
   5142		return MEM_LATENCY_ERR;
   5143}
   5144
   5145static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
   5146{
   5147	struct phm_ppt_v1_information *table_info =
   5148			(struct phm_ppt_v1_information *)hwmgr->pptable;
   5149	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
   5150	int i;
   5151	struct phm_clock_voltage_dependency_table *mclk_table;
   5152
   5153	if (hwmgr->pp_table_version == PP_TABLE_V1) {
   5154		if (table_info == NULL)
   5155			return -EINVAL;
   5156		dep_mclk_table = table_info->vdd_dep_on_mclk;
   5157		for (i = 0; i < dep_mclk_table->count; i++) {
   5158			clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
   5159			clocks->latency[i] = smu7_get_mem_latency(hwmgr,
   5160						dep_mclk_table->entries[i].clk);
   5161		}
   5162		clocks->count = dep_mclk_table->count;
   5163	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
   5164		mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
   5165		for (i = 0; i < mclk_table->count; i++)
   5166			clocks->clock[i] = mclk_table->entries[i].clk * 10;
   5167		clocks->count = mclk_table->count;
   5168	}
   5169	return 0;
   5170}
   5171
   5172static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
   5173						struct amd_pp_clocks *clocks)
   5174{
   5175	switch (type) {
   5176	case amd_pp_sys_clock:
   5177		smu7_get_sclks(hwmgr, clocks);
   5178		break;
   5179	case amd_pp_mem_clock:
   5180		smu7_get_mclks(hwmgr, clocks);
   5181		break;
   5182	default:
   5183		return -EINVAL;
   5184	}
   5185
   5186	return 0;
   5187}
   5188
   5189static int smu7_get_sclks_with_latency(struct pp_hwmgr *hwmgr,
   5190				       struct pp_clock_levels_with_latency *clocks)
   5191{
   5192	struct phm_ppt_v1_information *table_info =
   5193			(struct phm_ppt_v1_information *)hwmgr->pptable;
   5194	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
   5195			table_info->vdd_dep_on_sclk;
   5196	int i;
   5197
   5198	clocks->num_levels = 0;
   5199	for (i = 0; i < dep_sclk_table->count; i++) {
   5200		if (dep_sclk_table->entries[i].clk) {
   5201			clocks->data[clocks->num_levels].clocks_in_khz =
   5202				dep_sclk_table->entries[i].clk * 10;
   5203			clocks->num_levels++;
   5204		}
   5205	}
   5206
   5207	return 0;
   5208}
   5209
   5210static int smu7_get_mclks_with_latency(struct pp_hwmgr *hwmgr,
   5211				       struct pp_clock_levels_with_latency *clocks)
   5212{
   5213	struct phm_ppt_v1_information *table_info =
   5214			(struct phm_ppt_v1_information *)hwmgr->pptable;
   5215	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
   5216			table_info->vdd_dep_on_mclk;
   5217	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5218	int i;
   5219
   5220	clocks->num_levels = 0;
   5221	data->mclk_latency_table.count = 0;
   5222	for (i = 0; i < dep_mclk_table->count; i++) {
   5223		if (dep_mclk_table->entries[i].clk) {
   5224			clocks->data[clocks->num_levels].clocks_in_khz =
   5225					dep_mclk_table->entries[i].clk * 10;
   5226			data->mclk_latency_table.entries[data->mclk_latency_table.count].frequency =
   5227					dep_mclk_table->entries[i].clk;
   5228			clocks->data[clocks->num_levels].latency_in_us =
   5229				data->mclk_latency_table.entries[data->mclk_latency_table.count].latency =
   5230					smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk);
   5231			clocks->num_levels++;
   5232			data->mclk_latency_table.count++;
   5233		}
   5234	}
   5235
   5236	return 0;
   5237}
   5238
   5239static int smu7_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
   5240					       enum amd_pp_clock_type type,
   5241					       struct pp_clock_levels_with_latency *clocks)
   5242{
   5243	if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
   5244	      hwmgr->chip_id <= CHIP_VEGAM))
   5245		return -EINVAL;
   5246
   5247	switch (type) {
   5248	case amd_pp_sys_clock:
   5249		smu7_get_sclks_with_latency(hwmgr, clocks);
   5250		break;
   5251	case amd_pp_mem_clock:
   5252		smu7_get_mclks_with_latency(hwmgr, clocks);
   5253		break;
   5254	default:
   5255		return -EINVAL;
   5256	}
   5257
   5258	return 0;
   5259}
   5260
   5261static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
   5262						 void *clock_range)
   5263{
   5264	struct phm_ppt_v1_information *table_info =
   5265			(struct phm_ppt_v1_information *)hwmgr->pptable;
   5266	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
   5267			table_info->vdd_dep_on_mclk;
   5268	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
   5269			table_info->vdd_dep_on_sclk;
   5270	struct polaris10_smumgr *smu_data =
   5271			(struct polaris10_smumgr *)(hwmgr->smu_backend);
   5272	SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
   5273	struct dm_pp_wm_sets_with_clock_ranges *watermarks =
   5274			(struct dm_pp_wm_sets_with_clock_ranges *)clock_range;
   5275	uint32_t i, j, k;
   5276	bool valid_entry;
   5277
   5278	if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
   5279	      hwmgr->chip_id <= CHIP_VEGAM))
   5280		return -EINVAL;
   5281
   5282	for (i = 0; i < dep_mclk_table->count; i++) {
   5283		for (j = 0; j < dep_sclk_table->count; j++) {
   5284			valid_entry = false;
   5285			for (k = 0; k < watermarks->num_wm_sets; k++) {
   5286				if (dep_sclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz / 10 &&
   5287				    dep_sclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz / 10 &&
   5288				    dep_mclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz / 10 &&
   5289				    dep_mclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz / 10) {
   5290					valid_entry = true;
   5291					table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id;
   5292					break;
   5293				}
   5294			}
   5295			PP_ASSERT_WITH_CODE(valid_entry,
   5296					"Clock is not in range of specified clock range for watermark from DAL!  Using highest water mark set.",
   5297					table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k - 1].wm_set_id);
   5298		}
   5299	}
   5300
   5301	return smu7_copy_bytes_to_smc(hwmgr,
   5302				      smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, DisplayWatermark),
   5303				      (uint8_t *)table->DisplayWatermark,
   5304				      sizeof(uint8_t) * SMU74_MAX_LEVELS_MEMORY * SMU74_MAX_LEVELS_GRAPHICS,
   5305				      SMC_RAM_END);
   5306}
   5307
   5308static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
   5309					uint32_t virtual_addr_low,
   5310					uint32_t virtual_addr_hi,
   5311					uint32_t mc_addr_low,
   5312					uint32_t mc_addr_hi,
   5313					uint32_t size)
   5314{
   5315	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5316
   5317	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   5318					data->soft_regs_start +
   5319					smum_get_offsetof(hwmgr,
   5320					SMU_SoftRegisters, DRAM_LOG_ADDR_H),
   5321					mc_addr_hi);
   5322
   5323	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   5324					data->soft_regs_start +
   5325					smum_get_offsetof(hwmgr,
   5326					SMU_SoftRegisters, DRAM_LOG_ADDR_L),
   5327					mc_addr_low);
   5328
   5329	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   5330					data->soft_regs_start +
   5331					smum_get_offsetof(hwmgr,
   5332					SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
   5333					virtual_addr_hi);
   5334
   5335	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   5336					data->soft_regs_start +
   5337					smum_get_offsetof(hwmgr,
   5338					SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
   5339					virtual_addr_low);
   5340
   5341	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   5342					data->soft_regs_start +
   5343					smum_get_offsetof(hwmgr,
   5344					SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
   5345					size);
   5346	return 0;
   5347}
   5348
   5349static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
   5350					struct amd_pp_simple_clock_info *clocks)
   5351{
   5352	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5353	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
   5354	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
   5355
   5356	if (clocks == NULL)
   5357		return -EINVAL;
   5358
   5359	clocks->memory_max_clock = mclk_table->count > 1 ?
   5360				mclk_table->dpm_levels[mclk_table->count-1].value :
   5361				mclk_table->dpm_levels[0].value;
   5362	clocks->engine_max_clock = sclk_table->count > 1 ?
   5363				sclk_table->dpm_levels[sclk_table->count-1].value :
   5364				sclk_table->dpm_levels[0].value;
   5365	return 0;
   5366}
   5367
   5368static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
   5369		struct PP_TemperatureRange *thermal_data)
   5370{
   5371	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5372	struct phm_ppt_v1_information *table_info =
   5373			(struct phm_ppt_v1_information *)hwmgr->pptable;
   5374
   5375	memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
   5376
   5377	if (hwmgr->pp_table_version == PP_TABLE_V1)
   5378		thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
   5379			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
   5380	else if (hwmgr->pp_table_version == PP_TABLE_V0)
   5381		thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
   5382			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
   5383
   5384	return 0;
   5385}
   5386
   5387static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
   5388					enum PP_OD_DPM_TABLE_COMMAND type,
   5389					uint32_t clk,
   5390					uint32_t voltage)
   5391{
   5392	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5393
   5394	if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
   5395		pr_info("OD voltage is out of range [%d - %d] mV\n",
   5396						data->odn_dpm_table.min_vddc,
   5397						data->odn_dpm_table.max_vddc);
   5398		return false;
   5399	}
   5400
   5401	if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
   5402		if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
   5403			hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
   5404			pr_info("OD engine clock is out of range [%d - %d] MHz\n",
   5405				data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
   5406				hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
   5407			return false;
   5408		}
   5409	} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
   5410		if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
   5411			hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
   5412			pr_info("OD memory clock is out of range [%d - %d] MHz\n",
   5413				data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
   5414				hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
   5415			return false;
   5416		}
   5417	} else {
   5418		return false;
   5419	}
   5420
   5421	return true;
   5422}
   5423
   5424static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
   5425					enum PP_OD_DPM_TABLE_COMMAND type,
   5426					long *input, uint32_t size)
   5427{
   5428	uint32_t i;
   5429	struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
   5430	struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
   5431	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5432
   5433	uint32_t input_clk;
   5434	uint32_t input_vol;
   5435	uint32_t input_level;
   5436
   5437	PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
   5438				return -EINVAL);
   5439
   5440	if (!hwmgr->od_enabled) {
   5441		pr_info("OverDrive feature not enabled\n");
   5442		return -EINVAL;
   5443	}
   5444
   5445	if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
   5446		podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
   5447		podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
   5448		PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
   5449				"Failed to get ODN SCLK and Voltage tables",
   5450				return -EINVAL);
   5451	} else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
   5452		podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
   5453		podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
   5454
   5455		PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
   5456			"Failed to get ODN MCLK and Voltage tables",
   5457			return -EINVAL);
   5458	} else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
   5459		smu7_odn_initial_default_setting(hwmgr);
   5460		return 0;
   5461	} else if (PP_OD_COMMIT_DPM_TABLE == type) {
   5462		smu7_check_dpm_table_updated(hwmgr);
   5463		return 0;
   5464	} else {
   5465		return -EINVAL;
   5466	}
   5467
   5468	for (i = 0; i < size; i += 3) {
   5469		if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
   5470			pr_info("invalid clock voltage input \n");
   5471			return 0;
   5472		}
   5473		input_level = input[i];
   5474		input_clk = input[i+1] * 100;
   5475		input_vol = input[i+2];
   5476
   5477		if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
   5478			podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
   5479			podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
   5480			podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
   5481			podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
   5482			podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol;
   5483		} else {
   5484			return -EINVAL;
   5485		}
   5486	}
   5487
   5488	return 0;
   5489}
   5490
   5491static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
   5492{
   5493	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5494	uint32_t i, size = 0;
   5495	uint32_t len;
   5496
   5497	static const char *title[8] = {"NUM",
   5498			"MODE_NAME",
   5499			"SCLK_UP_HYST",
   5500			"SCLK_DOWN_HYST",
   5501			"SCLK_ACTIVE_LEVEL",
   5502			"MCLK_UP_HYST",
   5503			"MCLK_DOWN_HYST",
   5504			"MCLK_ACTIVE_LEVEL"};
   5505
   5506	if (!buf)
   5507		return -EINVAL;
   5508
   5509	phm_get_sysfs_buf(&buf, &size);
   5510
   5511	size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
   5512			title[0], title[1], title[2], title[3],
   5513			title[4], title[5], title[6], title[7]);
   5514
   5515	len = ARRAY_SIZE(smu7_profiling);
   5516
   5517	for (i = 0; i < len; i++) {
   5518		if (i == hwmgr->power_profile_mode) {
   5519			size += sysfs_emit_at(buf, size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
   5520			i, amdgpu_pp_profile_name[i], "*",
   5521			data->current_profile_setting.sclk_up_hyst,
   5522			data->current_profile_setting.sclk_down_hyst,
   5523			data->current_profile_setting.sclk_activity,
   5524			data->current_profile_setting.mclk_up_hyst,
   5525			data->current_profile_setting.mclk_down_hyst,
   5526			data->current_profile_setting.mclk_activity);
   5527			continue;
   5528		}
   5529		if (smu7_profiling[i].bupdate_sclk)
   5530			size += sysfs_emit_at(buf, size, "%3d %16s: %8d %16d %16d ",
   5531			i, amdgpu_pp_profile_name[i], smu7_profiling[i].sclk_up_hyst,
   5532			smu7_profiling[i].sclk_down_hyst,
   5533			smu7_profiling[i].sclk_activity);
   5534		else
   5535			size += sysfs_emit_at(buf, size, "%3d %16s: %8s %16s %16s ",
   5536			i, amdgpu_pp_profile_name[i], "-", "-", "-");
   5537
   5538		if (smu7_profiling[i].bupdate_mclk)
   5539			size += sysfs_emit_at(buf, size, "%16d %16d %16d\n",
   5540			smu7_profiling[i].mclk_up_hyst,
   5541			smu7_profiling[i].mclk_down_hyst,
   5542			smu7_profiling[i].mclk_activity);
   5543		else
   5544			size += sysfs_emit_at(buf, size, "%16s %16s %16s\n",
   5545			"-", "-", "-");
   5546	}
   5547
   5548	return size;
   5549}
   5550
   5551static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
   5552					enum PP_SMC_POWER_PROFILE requst)
   5553{
   5554	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5555	uint32_t tmp, level;
   5556
   5557	if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
   5558		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
   5559			level = 0;
   5560			tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
   5561			while (tmp >>= 1)
   5562				level++;
   5563			if (level > 0)
   5564				smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
   5565		}
   5566	} else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
   5567		smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
   5568	}
   5569}
   5570
   5571static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
   5572{
   5573	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
   5574	struct profile_mode_setting tmp;
   5575	enum PP_SMC_POWER_PROFILE mode;
   5576
   5577	if (input == NULL)
   5578		return -EINVAL;
   5579
   5580	mode = input[size];
   5581	switch (mode) {
   5582	case PP_SMC_POWER_PROFILE_CUSTOM:
   5583		if (size < 8 && size != 0)
   5584			return -EINVAL;
   5585		/* If only CUSTOM is passed in, use the saved values. Check
   5586		 * that we actually have a CUSTOM profile by ensuring that
   5587		 * the "use sclk" or the "use mclk" bits are set
   5588		 */
   5589		tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
   5590		if (size == 0) {
   5591			if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
   5592				return -EINVAL;
   5593		} else {
   5594			tmp.bupdate_sclk = input[0];
   5595			tmp.sclk_up_hyst = input[1];
   5596			tmp.sclk_down_hyst = input[2];
   5597			tmp.sclk_activity = input[3];
   5598			tmp.bupdate_mclk = input[4];
   5599			tmp.mclk_up_hyst = input[5];
   5600			tmp.mclk_down_hyst = input[6];
   5601			tmp.mclk_activity = input[7];
   5602			smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
   5603		}
   5604		if (!smum_update_dpm_settings(hwmgr, &tmp)) {
   5605			memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
   5606			hwmgr->power_profile_mode = mode;
   5607		}
   5608		break;
   5609	case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
   5610	case PP_SMC_POWER_PROFILE_POWERSAVING:
   5611	case PP_SMC_POWER_PROFILE_VIDEO:
   5612	case PP_SMC_POWER_PROFILE_VR:
   5613	case PP_SMC_POWER_PROFILE_COMPUTE:
   5614		if (mode == hwmgr->power_profile_mode)
   5615			return 0;
   5616
   5617		memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
   5618		if (!smum_update_dpm_settings(hwmgr, &tmp)) {
   5619			if (tmp.bupdate_sclk) {
   5620				data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
   5621				data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
   5622				data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
   5623				data->current_profile_setting.sclk_activity = tmp.sclk_activity;
   5624			}
   5625			if (tmp.bupdate_mclk) {
   5626				data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
   5627				data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
   5628				data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
   5629				data->current_profile_setting.mclk_activity = tmp.mclk_activity;
   5630			}
   5631			smu7_patch_compute_profile_mode(hwmgr, mode);
   5632			hwmgr->power_profile_mode = mode;
   5633		}
   5634		break;
   5635	default:
   5636		return -EINVAL;
   5637	}
   5638
   5639	return 0;
   5640}
   5641
   5642static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
   5643				PHM_PerformanceLevelDesignation designation, uint32_t index,
   5644				PHM_PerformanceLevel *level)
   5645{
   5646	const struct smu7_power_state *ps;
   5647	uint32_t i;
   5648
   5649	if (level == NULL || hwmgr == NULL || state == NULL)
   5650		return -EINVAL;
   5651
   5652	ps = cast_const_phw_smu7_power_state(state);
   5653
   5654	i = index > ps->performance_level_count - 1 ?
   5655			ps->performance_level_count - 1 : index;
   5656
   5657	level->coreClock = ps->performance_levels[i].engine_clock;
   5658	level->memory_clock = ps->performance_levels[i].memory_clock;
   5659
   5660	return 0;
   5661}
   5662
   5663static int smu7_power_off_asic(struct pp_hwmgr *hwmgr)
   5664{
   5665	int result;
   5666
   5667	result = smu7_disable_dpm_tasks(hwmgr);
   5668	PP_ASSERT_WITH_CODE((0 == result),
   5669			"[disable_dpm_tasks] Failed to disable DPM!",
   5670			);
   5671
   5672	return result;
   5673}
   5674
   5675static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
   5676	.backend_init = &smu7_hwmgr_backend_init,
   5677	.backend_fini = &smu7_hwmgr_backend_fini,
   5678	.asic_setup = &smu7_setup_asic_task,
   5679	.dynamic_state_management_enable = &smu7_enable_dpm_tasks,
   5680	.apply_state_adjust_rules = smu7_apply_state_adjust_rules,
   5681	.force_dpm_level = &smu7_force_dpm_level,
   5682	.power_state_set = smu7_set_power_state_tasks,
   5683	.get_power_state_size = smu7_get_power_state_size,
   5684	.get_mclk = smu7_dpm_get_mclk,
   5685	.get_sclk = smu7_dpm_get_sclk,
   5686	.patch_boot_state = smu7_dpm_patch_boot_state,
   5687	.get_pp_table_entry = smu7_get_pp_table_entry,
   5688	.get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
   5689	.powerdown_uvd = smu7_powerdown_uvd,
   5690	.powergate_uvd = smu7_powergate_uvd,
   5691	.powergate_vce = smu7_powergate_vce,
   5692	.disable_clock_power_gating = smu7_disable_clock_power_gating,
   5693	.update_clock_gatings = smu7_update_clock_gatings,
   5694	.notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
   5695	.display_config_changed = smu7_display_configuration_changed_task,
   5696	.set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
   5697	.set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
   5698	.stop_thermal_controller = smu7_thermal_stop_thermal_controller,
   5699	.get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
   5700	.get_fan_speed_pwm = smu7_fan_ctrl_get_fan_speed_pwm,
   5701	.set_fan_speed_pwm = smu7_fan_ctrl_set_fan_speed_pwm,
   5702	.reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
   5703	.get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
   5704	.set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
   5705	.uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
   5706	.register_irq_handlers = smu7_register_irq_handlers,
   5707	.check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
   5708	.check_states_equal = smu7_check_states_equal,
   5709	.set_fan_control_mode = smu7_set_fan_control_mode,
   5710	.get_fan_control_mode = smu7_get_fan_control_mode,
   5711	.force_clock_level = smu7_force_clock_level,
   5712	.print_clock_levels = smu7_print_clock_levels,
   5713	.powergate_gfx = smu7_powergate_gfx,
   5714	.get_sclk_od = smu7_get_sclk_od,
   5715	.set_sclk_od = smu7_set_sclk_od,
   5716	.get_mclk_od = smu7_get_mclk_od,
   5717	.set_mclk_od = smu7_set_mclk_od,
   5718	.get_clock_by_type = smu7_get_clock_by_type,
   5719	.get_clock_by_type_with_latency = smu7_get_clock_by_type_with_latency,
   5720	.set_watermarks_for_clocks_ranges = smu7_set_watermarks_for_clocks_ranges,
   5721	.read_sensor = smu7_read_sensor,
   5722	.dynamic_state_management_disable = smu7_disable_dpm_tasks,
   5723	.avfs_control = smu7_avfs_control,
   5724	.disable_smc_firmware_ctf = smu7_thermal_disable_alert,
   5725	.start_thermal_controller = smu7_start_thermal_controller,
   5726	.notify_cac_buffer_info = smu7_notify_cac_buffer_info,
   5727	.get_max_high_clocks = smu7_get_max_high_clocks,
   5728	.get_thermal_temperature_range = smu7_get_thermal_temperature_range,
   5729	.odn_edit_dpm_table = smu7_odn_edit_dpm_table,
   5730	.set_power_limit = smu7_set_power_limit,
   5731	.get_power_profile_mode = smu7_get_power_profile_mode,
   5732	.set_power_profile_mode = smu7_set_power_profile_mode,
   5733	.get_performance_level = smu7_get_performance_level,
   5734	.get_asic_baco_capability = smu7_baco_get_capability,
   5735	.get_asic_baco_state = smu7_baco_get_state,
   5736	.set_asic_baco_state = smu7_baco_set_state,
   5737	.power_off_asic = smu7_power_off_asic,
   5738};
   5739
   5740uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
   5741		uint32_t clock_insr)
   5742{
   5743	uint8_t i;
   5744	uint32_t temp;
   5745	uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
   5746
   5747	PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
   5748	for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
   5749		temp = clock >> i;
   5750
   5751		if (temp >= min || i == 0)
   5752			break;
   5753	}
   5754	return i;
   5755}
   5756
   5757int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
   5758{
   5759	hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
   5760	if (hwmgr->pp_table_version == PP_TABLE_V0)
   5761		hwmgr->pptable_func = &pptable_funcs;
   5762	else if (hwmgr->pp_table_version == PP_TABLE_V1)
   5763		hwmgr->pptable_func = &pptable_v1_0_funcs;
   5764
   5765	return 0;
   5766}