cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

legacy_dpm.c (40665B)


      1/*
      2 * Copyright 2021 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 */
     22
     23#include "amdgpu.h"
     24#include "amdgpu_i2c.h"
     25#include "amdgpu_atombios.h"
     26#include "atom.h"
     27#include "amd_pcie.h"
     28#include "legacy_dpm.h"
     29#include "amdgpu_dpm_internal.h"
     30#include "amdgpu_display.h"
     31
     32#define amdgpu_dpm_pre_set_power_state(adev) \
     33		((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
     34
     35#define amdgpu_dpm_post_set_power_state(adev) \
     36		((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
     37
     38#define amdgpu_dpm_display_configuration_changed(adev) \
     39		((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
     40
     41#define amdgpu_dpm_print_power_state(adev, ps) \
     42		((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
     43
     44#define amdgpu_dpm_vblank_too_short(adev) \
     45		((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
     46
     47#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
     48		((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
     49
     50void amdgpu_dpm_print_class_info(u32 class, u32 class2)
     51{
     52	const char *s;
     53
     54	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
     55	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
     56	default:
     57		s = "none";
     58		break;
     59	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
     60		s = "battery";
     61		break;
     62	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
     63		s = "balanced";
     64		break;
     65	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
     66		s = "performance";
     67		break;
     68	}
     69	printk("\tui class: %s\n", s);
     70	printk("\tinternal class:");
     71	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
     72	    (class2 == 0))
     73		pr_cont(" none");
     74	else {
     75		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
     76			pr_cont(" boot");
     77		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
     78			pr_cont(" thermal");
     79		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
     80			pr_cont(" limited_pwr");
     81		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
     82			pr_cont(" rest");
     83		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
     84			pr_cont(" forced");
     85		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
     86			pr_cont(" 3d_perf");
     87		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
     88			pr_cont(" ovrdrv");
     89		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
     90			pr_cont(" uvd");
     91		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
     92			pr_cont(" 3d_low");
     93		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
     94			pr_cont(" acpi");
     95		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
     96			pr_cont(" uvd_hd2");
     97		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
     98			pr_cont(" uvd_hd");
     99		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
    100			pr_cont(" uvd_sd");
    101		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
    102			pr_cont(" limited_pwr2");
    103		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
    104			pr_cont(" ulv");
    105		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
    106			pr_cont(" uvd_mvc");
    107	}
    108	pr_cont("\n");
    109}
    110
    111void amdgpu_dpm_print_cap_info(u32 caps)
    112{
    113	printk("\tcaps:");
    114	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
    115		pr_cont(" single_disp");
    116	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
    117		pr_cont(" video");
    118	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
    119		pr_cont(" no_dc");
    120	pr_cont("\n");
    121}
    122
    123void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
    124				struct amdgpu_ps *rps)
    125{
    126	printk("\tstatus:");
    127	if (rps == adev->pm.dpm.current_ps)
    128		pr_cont(" c");
    129	if (rps == adev->pm.dpm.requested_ps)
    130		pr_cont(" r");
    131	if (rps == adev->pm.dpm.boot_ps)
    132		pr_cont(" b");
    133	pr_cont("\n");
    134}
    135
    136void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
    137{
    138	int i;
    139
    140	if (adev->powerplay.pp_funcs->print_power_state == NULL)
    141		return;
    142
    143	for (i = 0; i < adev->pm.dpm.num_ps; i++)
    144		amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
    145
    146}
    147
    148union power_info {
    149	struct _ATOM_POWERPLAY_INFO info;
    150	struct _ATOM_POWERPLAY_INFO_V2 info_2;
    151	struct _ATOM_POWERPLAY_INFO_V3 info_3;
    152	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
    153	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
    154	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
    155	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
    156	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
    157};
    158
    159int amdgpu_get_platform_caps(struct amdgpu_device *adev)
    160{
    161	struct amdgpu_mode_info *mode_info = &adev->mode_info;
    162	union power_info *power_info;
    163	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
    164	u16 data_offset;
    165	u8 frev, crev;
    166
    167	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
    168				   &frev, &crev, &data_offset))
    169		return -EINVAL;
    170	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
    171
    172	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
    173	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
    174	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
    175
    176	return 0;
    177}
    178
    179union fan_info {
    180	struct _ATOM_PPLIB_FANTABLE fan;
    181	struct _ATOM_PPLIB_FANTABLE2 fan2;
    182	struct _ATOM_PPLIB_FANTABLE3 fan3;
    183};
    184
    185static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
    186					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
    187{
    188	u32 size = atom_table->ucNumEntries *
    189		sizeof(struct amdgpu_clock_voltage_dependency_entry);
    190	int i;
    191	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
    192
    193	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
    194	if (!amdgpu_table->entries)
    195		return -ENOMEM;
    196
    197	entry = &atom_table->entries[0];
    198	for (i = 0; i < atom_table->ucNumEntries; i++) {
    199		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
    200			(entry->ucClockHigh << 16);
    201		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
    202		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
    203			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
    204	}
    205	amdgpu_table->count = atom_table->ucNumEntries;
    206
    207	return 0;
    208}
    209
    210/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
    211#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
    212#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
    213#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
    214#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
    215#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
    216#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
    217#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
    218#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
    219
    220int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
    221{
    222	struct amdgpu_mode_info *mode_info = &adev->mode_info;
    223	union power_info *power_info;
    224	union fan_info *fan_info;
    225	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
    226	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
    227	u16 data_offset;
    228	u8 frev, crev;
    229	int ret, i;
    230
    231	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
    232				   &frev, &crev, &data_offset))
    233		return -EINVAL;
    234	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
    235
    236	/* fan table */
    237	if (le16_to_cpu(power_info->pplib.usTableSize) >=
    238	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
    239		if (power_info->pplib3.usFanTableOffset) {
    240			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
    241						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
    242			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
    243			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
    244			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
    245			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
    246			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
    247			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
    248			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
    249			if (fan_info->fan.ucFanTableFormat >= 2)
    250				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
    251			else
    252				adev->pm.dpm.fan.t_max = 10900;
    253			adev->pm.dpm.fan.cycle_delay = 100000;
    254			if (fan_info->fan.ucFanTableFormat >= 3) {
    255				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
    256				adev->pm.dpm.fan.default_max_fan_pwm =
    257					le16_to_cpu(fan_info->fan3.usFanPWMMax);
    258				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
    259				adev->pm.dpm.fan.fan_output_sensitivity =
    260					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
    261			}
    262			adev->pm.dpm.fan.ucode_fan_control = true;
    263		}
    264	}
    265
    266	/* clock dependancy tables, shedding tables */
    267	if (le16_to_cpu(power_info->pplib.usTableSize) >=
    268	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
    269		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
    270			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
    271				(mode_info->atom_context->bios + data_offset +
    272				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
    273			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
    274								 dep_table);
    275			if (ret) {
    276				amdgpu_free_extended_power_table(adev);
    277				return ret;
    278			}
    279		}
    280		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
    281			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
    282				(mode_info->atom_context->bios + data_offset +
    283				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
    284			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
    285								 dep_table);
    286			if (ret) {
    287				amdgpu_free_extended_power_table(adev);
    288				return ret;
    289			}
    290		}
    291		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
    292			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
    293				(mode_info->atom_context->bios + data_offset +
    294				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
    295			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
    296								 dep_table);
    297			if (ret) {
    298				amdgpu_free_extended_power_table(adev);
    299				return ret;
    300			}
    301		}
    302		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
    303			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
    304				(mode_info->atom_context->bios + data_offset +
    305				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
    306			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
    307								 dep_table);
    308			if (ret) {
    309				amdgpu_free_extended_power_table(adev);
    310				return ret;
    311			}
    312		}
    313		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
    314			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
    315				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
    316				(mode_info->atom_context->bios + data_offset +
    317				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
    318			if (clk_v->ucNumEntries) {
    319				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
    320					le16_to_cpu(clk_v->entries[0].usSclkLow) |
    321					(clk_v->entries[0].ucSclkHigh << 16);
    322				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
    323					le16_to_cpu(clk_v->entries[0].usMclkLow) |
    324					(clk_v->entries[0].ucMclkHigh << 16);
    325				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
    326					le16_to_cpu(clk_v->entries[0].usVddc);
    327				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
    328					le16_to_cpu(clk_v->entries[0].usVddci);
    329			}
    330		}
    331		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
    332			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
    333				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
    334				(mode_info->atom_context->bios + data_offset +
    335				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
    336			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
    337
    338			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
    339				kcalloc(psl->ucNumEntries,
    340					sizeof(struct amdgpu_phase_shedding_limits_entry),
    341					GFP_KERNEL);
    342			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
    343				amdgpu_free_extended_power_table(adev);
    344				return -ENOMEM;
    345			}
    346
    347			entry = &psl->entries[0];
    348			for (i = 0; i < psl->ucNumEntries; i++) {
    349				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
    350					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
    351				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
    352					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
    353				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
    354					le16_to_cpu(entry->usVoltage);
    355				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
    356					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
    357			}
    358			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
    359				psl->ucNumEntries;
    360		}
    361	}
    362
    363	/* cac data */
    364	if (le16_to_cpu(power_info->pplib.usTableSize) >=
    365	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
    366		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
    367		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
    368		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
    369		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
    370		if (adev->pm.dpm.tdp_od_limit)
    371			adev->pm.dpm.power_control = true;
    372		else
    373			adev->pm.dpm.power_control = false;
    374		adev->pm.dpm.tdp_adjustment = 0;
    375		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
    376		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
    377		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
    378		if (power_info->pplib5.usCACLeakageTableOffset) {
    379			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
    380				(ATOM_PPLIB_CAC_Leakage_Table *)
    381				(mode_info->atom_context->bios + data_offset +
    382				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
    383			ATOM_PPLIB_CAC_Leakage_Record *entry;
    384			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
    385			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
    386			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
    387				amdgpu_free_extended_power_table(adev);
    388				return -ENOMEM;
    389			}
    390			entry = &cac_table->entries[0];
    391			for (i = 0; i < cac_table->ucNumEntries; i++) {
    392				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
    393					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
    394						le16_to_cpu(entry->usVddc1);
    395					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
    396						le16_to_cpu(entry->usVddc2);
    397					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
    398						le16_to_cpu(entry->usVddc3);
    399				} else {
    400					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
    401						le16_to_cpu(entry->usVddc);
    402					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
    403						le32_to_cpu(entry->ulLeakageValue);
    404				}
    405				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
    406					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
    407			}
    408			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
    409		}
    410	}
    411
    412	/* ext tables */
    413	if (le16_to_cpu(power_info->pplib.usTableSize) >=
    414	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
    415		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
    416			(mode_info->atom_context->bios + data_offset +
    417			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
    418		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
    419			ext_hdr->usVCETableOffset) {
    420			VCEClockInfoArray *array = (VCEClockInfoArray *)
    421				(mode_info->atom_context->bios + data_offset +
    422				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
    423			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
    424				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
    425				(mode_info->atom_context->bios + data_offset +
    426				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
    427				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
    428			ATOM_PPLIB_VCE_State_Table *states =
    429				(ATOM_PPLIB_VCE_State_Table *)
    430				(mode_info->atom_context->bios + data_offset +
    431				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
    432				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
    433				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
    434			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
    435			ATOM_PPLIB_VCE_State_Record *state_entry;
    436			VCEClockInfo *vce_clk;
    437			u32 size = limits->numEntries *
    438				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
    439			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
    440				kzalloc(size, GFP_KERNEL);
    441			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
    442				amdgpu_free_extended_power_table(adev);
    443				return -ENOMEM;
    444			}
    445			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
    446				limits->numEntries;
    447			entry = &limits->entries[0];
    448			state_entry = &states->entries[0];
    449			for (i = 0; i < limits->numEntries; i++) {
    450				vce_clk = (VCEClockInfo *)
    451					((u8 *)&array->entries[0] +
    452					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
    453				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
    454					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
    455				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
    456					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
    457				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
    458					le16_to_cpu(entry->usVoltage);
    459				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
    460					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
    461			}
    462			adev->pm.dpm.num_of_vce_states =
    463					states->numEntries > AMD_MAX_VCE_LEVELS ?
    464					AMD_MAX_VCE_LEVELS : states->numEntries;
    465			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
    466				vce_clk = (VCEClockInfo *)
    467					((u8 *)&array->entries[0] +
    468					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
    469				adev->pm.dpm.vce_states[i].evclk =
    470					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
    471				adev->pm.dpm.vce_states[i].ecclk =
    472					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
    473				adev->pm.dpm.vce_states[i].clk_idx =
    474					state_entry->ucClockInfoIndex & 0x3f;
    475				adev->pm.dpm.vce_states[i].pstate =
    476					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
    477				state_entry = (ATOM_PPLIB_VCE_State_Record *)
    478					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
    479			}
    480		}
    481		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
    482			ext_hdr->usUVDTableOffset) {
    483			UVDClockInfoArray *array = (UVDClockInfoArray *)
    484				(mode_info->atom_context->bios + data_offset +
    485				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
    486			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
    487				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
    488				(mode_info->atom_context->bios + data_offset +
    489				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
    490				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
    491			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
    492			u32 size = limits->numEntries *
    493				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
    494			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
    495				kzalloc(size, GFP_KERNEL);
    496			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
    497				amdgpu_free_extended_power_table(adev);
    498				return -ENOMEM;
    499			}
    500			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
    501				limits->numEntries;
    502			entry = &limits->entries[0];
    503			for (i = 0; i < limits->numEntries; i++) {
    504				UVDClockInfo *uvd_clk = (UVDClockInfo *)
    505					((u8 *)&array->entries[0] +
    506					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
    507				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
    508					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
    509				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
    510					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
    511				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
    512					le16_to_cpu(entry->usVoltage);
    513				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
    514					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
    515			}
    516		}
    517		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
    518			ext_hdr->usSAMUTableOffset) {
    519			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
    520				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
    521				(mode_info->atom_context->bios + data_offset +
    522				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
    523			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
    524			u32 size = limits->numEntries *
    525				sizeof(struct amdgpu_clock_voltage_dependency_entry);
    526			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
    527				kzalloc(size, GFP_KERNEL);
    528			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
    529				amdgpu_free_extended_power_table(adev);
    530				return -ENOMEM;
    531			}
    532			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
    533				limits->numEntries;
    534			entry = &limits->entries[0];
    535			for (i = 0; i < limits->numEntries; i++) {
    536				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
    537					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
    538				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
    539					le16_to_cpu(entry->usVoltage);
    540				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
    541					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
    542			}
    543		}
    544		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
    545		    ext_hdr->usPPMTableOffset) {
    546			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
    547				(mode_info->atom_context->bios + data_offset +
    548				 le16_to_cpu(ext_hdr->usPPMTableOffset));
    549			adev->pm.dpm.dyn_state.ppm_table =
    550				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
    551			if (!adev->pm.dpm.dyn_state.ppm_table) {
    552				amdgpu_free_extended_power_table(adev);
    553				return -ENOMEM;
    554			}
    555			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
    556			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
    557				le16_to_cpu(ppm->usCpuCoreNumber);
    558			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
    559				le32_to_cpu(ppm->ulPlatformTDP);
    560			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
    561				le32_to_cpu(ppm->ulSmallACPlatformTDP);
    562			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
    563				le32_to_cpu(ppm->ulPlatformTDC);
    564			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
    565				le32_to_cpu(ppm->ulSmallACPlatformTDC);
    566			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
    567				le32_to_cpu(ppm->ulApuTDP);
    568			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
    569				le32_to_cpu(ppm->ulDGpuTDP);
    570			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
    571				le32_to_cpu(ppm->ulDGpuUlvPower);
    572			adev->pm.dpm.dyn_state.ppm_table->tj_max =
    573				le32_to_cpu(ppm->ulTjmax);
    574		}
    575		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
    576			ext_hdr->usACPTableOffset) {
    577			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
    578				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
    579				(mode_info->atom_context->bios + data_offset +
    580				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
    581			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
    582			u32 size = limits->numEntries *
    583				sizeof(struct amdgpu_clock_voltage_dependency_entry);
    584			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
    585				kzalloc(size, GFP_KERNEL);
    586			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
    587				amdgpu_free_extended_power_table(adev);
    588				return -ENOMEM;
    589			}
    590			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
    591				limits->numEntries;
    592			entry = &limits->entries[0];
    593			for (i = 0; i < limits->numEntries; i++) {
    594				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
    595					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
    596				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
    597					le16_to_cpu(entry->usVoltage);
    598				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
    599					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
    600			}
    601		}
    602		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
    603			ext_hdr->usPowerTuneTableOffset) {
    604			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
    605					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
    606			ATOM_PowerTune_Table *pt;
    607			adev->pm.dpm.dyn_state.cac_tdp_table =
    608				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
    609			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
    610				amdgpu_free_extended_power_table(adev);
    611				return -ENOMEM;
    612			}
    613			if (rev > 0) {
    614				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
    615					(mode_info->atom_context->bios + data_offset +
    616					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
    617				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
    618					ppt->usMaximumPowerDeliveryLimit;
    619				pt = &ppt->power_tune_table;
    620			} else {
    621				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
    622					(mode_info->atom_context->bios + data_offset +
    623					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
    624				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
    625				pt = &ppt->power_tune_table;
    626			}
    627			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
    628			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
    629				le16_to_cpu(pt->usConfigurableTDP);
    630			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
    631			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
    632				le16_to_cpu(pt->usBatteryPowerLimit);
    633			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
    634				le16_to_cpu(pt->usSmallPowerLimit);
    635			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
    636				le16_to_cpu(pt->usLowCACLeakage);
    637			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
    638				le16_to_cpu(pt->usHighCACLeakage);
    639		}
    640		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
    641				ext_hdr->usSclkVddgfxTableOffset) {
    642			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
    643				(mode_info->atom_context->bios + data_offset +
    644				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
    645			ret = amdgpu_parse_clk_voltage_dep_table(
    646					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
    647					dep_table);
    648			if (ret) {
    649				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
    650				return ret;
    651			}
    652		}
    653	}
    654
    655	return 0;
    656}
    657
    658void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
    659{
    660	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
    661
    662	kfree(dyn_state->vddc_dependency_on_sclk.entries);
    663	kfree(dyn_state->vddci_dependency_on_mclk.entries);
    664	kfree(dyn_state->vddc_dependency_on_mclk.entries);
    665	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
    666	kfree(dyn_state->cac_leakage_table.entries);
    667	kfree(dyn_state->phase_shedding_limits_table.entries);
    668	kfree(dyn_state->ppm_table);
    669	kfree(dyn_state->cac_tdp_table);
    670	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
    671	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
    672	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
    673	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
    674	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
    675}
    676
    677static const char *pp_lib_thermal_controller_names[] = {
    678	"NONE",
    679	"lm63",
    680	"adm1032",
    681	"adm1030",
    682	"max6649",
    683	"lm64",
    684	"f75375",
    685	"RV6xx",
    686	"RV770",
    687	"adt7473",
    688	"NONE",
    689	"External GPIO",
    690	"Evergreen",
    691	"emc2103",
    692	"Sumo",
    693	"Northern Islands",
    694	"Southern Islands",
    695	"lm96163",
    696	"Sea Islands",
    697	"Kaveri/Kabini",
    698};
    699
    700void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
    701{
    702	struct amdgpu_mode_info *mode_info = &adev->mode_info;
    703	ATOM_PPLIB_POWERPLAYTABLE *power_table;
    704	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
    705	ATOM_PPLIB_THERMALCONTROLLER *controller;
    706	struct amdgpu_i2c_bus_rec i2c_bus;
    707	u16 data_offset;
    708	u8 frev, crev;
    709
    710	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
    711				   &frev, &crev, &data_offset))
    712		return;
    713	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
    714		(mode_info->atom_context->bios + data_offset);
    715	controller = &power_table->sThermalController;
    716
    717	/* add the i2c bus for thermal/fan chip */
    718	if (controller->ucType > 0) {
    719		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
    720			adev->pm.no_fan = true;
    721		adev->pm.fan_pulses_per_revolution =
    722			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
    723		if (adev->pm.fan_pulses_per_revolution) {
    724			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
    725			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
    726		}
    727		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
    728			DRM_INFO("Internal thermal controller %s fan control\n",
    729				 (controller->ucFanParameters &
    730				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    731			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
    732		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
    733			DRM_INFO("Internal thermal controller %s fan control\n",
    734				 (controller->ucFanParameters &
    735				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    736			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
    737		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
    738			DRM_INFO("Internal thermal controller %s fan control\n",
    739				 (controller->ucFanParameters &
    740				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    741			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
    742		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
    743			DRM_INFO("Internal thermal controller %s fan control\n",
    744				 (controller->ucFanParameters &
    745				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    746			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
    747		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
    748			DRM_INFO("Internal thermal controller %s fan control\n",
    749				 (controller->ucFanParameters &
    750				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    751			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
    752		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
    753			DRM_INFO("Internal thermal controller %s fan control\n",
    754				 (controller->ucFanParameters &
    755				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    756			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
    757		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
    758			DRM_INFO("Internal thermal controller %s fan control\n",
    759				 (controller->ucFanParameters &
    760				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    761			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
    762		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
    763			DRM_INFO("Internal thermal controller %s fan control\n",
    764				 (controller->ucFanParameters &
    765				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    766			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
    767		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
    768			DRM_INFO("External GPIO thermal controller %s fan control\n",
    769				 (controller->ucFanParameters &
    770				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    771			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
    772		} else if (controller->ucType ==
    773			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
    774			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
    775				 (controller->ucFanParameters &
    776				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    777			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
    778		} else if (controller->ucType ==
    779			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
    780			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
    781				 (controller->ucFanParameters &
    782				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    783			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
    784		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
    785			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
    786				 pp_lib_thermal_controller_names[controller->ucType],
    787				 controller->ucI2cAddress >> 1,
    788				 (controller->ucFanParameters &
    789				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    790			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
    791			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
    792			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
    793			if (adev->pm.i2c_bus) {
    794				struct i2c_board_info info = { };
    795				const char *name = pp_lib_thermal_controller_names[controller->ucType];
    796				info.addr = controller->ucI2cAddress >> 1;
    797				strlcpy(info.type, name, sizeof(info.type));
    798				i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
    799			}
    800		} else {
    801			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
    802				 controller->ucType,
    803				 controller->ucI2cAddress >> 1,
    804				 (controller->ucFanParameters &
    805				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
    806		}
    807	}
    808}
    809
    810struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx)
    811{
    812	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    813
    814	if (idx < adev->pm.dpm.num_of_vce_states)
    815		return &adev->pm.dpm.vce_states[idx];
    816
    817	return NULL;
    818}
    819
    820static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
    821						     enum amd_pm_state_type dpm_state)
    822{
    823	int i;
    824	struct amdgpu_ps *ps;
    825	u32 ui_class;
    826	bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
    827		true : false;
    828
    829	/* check if the vblank period is too short to adjust the mclk */
    830	if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
    831		if (amdgpu_dpm_vblank_too_short(adev))
    832			single_display = false;
    833	}
    834
    835	/* certain older asics have a separare 3D performance state,
    836	 * so try that first if the user selected performance
    837	 */
    838	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
    839		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
    840	/* balanced states don't exist at the moment */
    841	if (dpm_state == POWER_STATE_TYPE_BALANCED)
    842		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
    843
    844restart_search:
    845	/* Pick the best power state based on current conditions */
    846	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
    847		ps = &adev->pm.dpm.ps[i];
    848		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
    849		switch (dpm_state) {
    850		/* user states */
    851		case POWER_STATE_TYPE_BATTERY:
    852			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
    853				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
    854					if (single_display)
    855						return ps;
    856				} else
    857					return ps;
    858			}
    859			break;
    860		case POWER_STATE_TYPE_BALANCED:
    861			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
    862				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
    863					if (single_display)
    864						return ps;
    865				} else
    866					return ps;
    867			}
    868			break;
    869		case POWER_STATE_TYPE_PERFORMANCE:
    870			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
    871				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
    872					if (single_display)
    873						return ps;
    874				} else
    875					return ps;
    876			}
    877			break;
    878		/* internal states */
    879		case POWER_STATE_TYPE_INTERNAL_UVD:
    880			if (adev->pm.dpm.uvd_ps)
    881				return adev->pm.dpm.uvd_ps;
    882			else
    883				break;
    884		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
    885			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
    886				return ps;
    887			break;
    888		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
    889			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
    890				return ps;
    891			break;
    892		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
    893			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
    894				return ps;
    895			break;
    896		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
    897			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
    898				return ps;
    899			break;
    900		case POWER_STATE_TYPE_INTERNAL_BOOT:
    901			return adev->pm.dpm.boot_ps;
    902		case POWER_STATE_TYPE_INTERNAL_THERMAL:
    903			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
    904				return ps;
    905			break;
    906		case POWER_STATE_TYPE_INTERNAL_ACPI:
    907			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
    908				return ps;
    909			break;
    910		case POWER_STATE_TYPE_INTERNAL_ULV:
    911			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
    912				return ps;
    913			break;
    914		case POWER_STATE_TYPE_INTERNAL_3DPERF:
    915			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
    916				return ps;
    917			break;
    918		default:
    919			break;
    920		}
    921	}
    922	/* use a fallback state if we didn't match */
    923	switch (dpm_state) {
    924	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
    925		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
    926		goto restart_search;
    927	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
    928	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
    929	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
    930		if (adev->pm.dpm.uvd_ps) {
    931			return adev->pm.dpm.uvd_ps;
    932		} else {
    933			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
    934			goto restart_search;
    935		}
    936	case POWER_STATE_TYPE_INTERNAL_THERMAL:
    937		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
    938		goto restart_search;
    939	case POWER_STATE_TYPE_INTERNAL_ACPI:
    940		dpm_state = POWER_STATE_TYPE_BATTERY;
    941		goto restart_search;
    942	case POWER_STATE_TYPE_BATTERY:
    943	case POWER_STATE_TYPE_BALANCED:
    944	case POWER_STATE_TYPE_INTERNAL_3DPERF:
    945		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
    946		goto restart_search;
    947	default:
    948		break;
    949	}
    950
    951	return NULL;
    952}
    953
    954static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
    955{
    956	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
    957	struct amdgpu_ps *ps;
    958	enum amd_pm_state_type dpm_state;
    959	int ret;
    960	bool equal = false;
    961
    962	/* if dpm init failed */
    963	if (!adev->pm.dpm_enabled)
    964		return 0;
    965
    966	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
    967		/* add other state override checks here */
    968		if ((!adev->pm.dpm.thermal_active) &&
    969		    (!adev->pm.dpm.uvd_active))
    970			adev->pm.dpm.state = adev->pm.dpm.user_state;
    971	}
    972	dpm_state = adev->pm.dpm.state;
    973
    974	ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
    975	if (ps)
    976		adev->pm.dpm.requested_ps = ps;
    977	else
    978		return -EINVAL;
    979
    980	if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
    981		printk("switching from power state:\n");
    982		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
    983		printk("switching to power state:\n");
    984		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
    985	}
    986
    987	/* update whether vce is active */
    988	ps->vce_active = adev->pm.dpm.vce_active;
    989	if (pp_funcs->display_configuration_changed)
    990		amdgpu_dpm_display_configuration_changed(adev);
    991
    992	ret = amdgpu_dpm_pre_set_power_state(adev);
    993	if (ret)
    994		return ret;
    995
    996	if (pp_funcs->check_state_equal) {
    997		if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
    998			equal = false;
    999	}
   1000
   1001	if (equal)
   1002		return 0;
   1003
   1004	if (pp_funcs->set_power_state)
   1005		pp_funcs->set_power_state(adev->powerplay.pp_handle);
   1006
   1007	amdgpu_dpm_post_set_power_state(adev);
   1008
   1009	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
   1010	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
   1011
   1012	if (pp_funcs->force_performance_level) {
   1013		if (adev->pm.dpm.thermal_active) {
   1014			enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
   1015			/* force low perf level for thermal */
   1016			pp_funcs->force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
   1017			/* save the user's level */
   1018			adev->pm.dpm.forced_level = level;
   1019		} else {
   1020			/* otherwise, user selected level */
   1021			pp_funcs->force_performance_level(adev, adev->pm.dpm.forced_level);
   1022		}
   1023	}
   1024
   1025	return 0;
   1026}
   1027
   1028void amdgpu_legacy_dpm_compute_clocks(void *handle)
   1029{
   1030	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1031
   1032	amdgpu_dpm_get_active_displays(adev);
   1033
   1034	amdgpu_dpm_change_power_state_locked(adev);
   1035}
   1036
   1037void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
   1038{
   1039	struct amdgpu_device *adev =
   1040		container_of(work, struct amdgpu_device,
   1041			     pm.dpm.thermal.work);
   1042	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
   1043	/* switch to the thermal state */
   1044	enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
   1045	int temp, size = sizeof(temp);
   1046
   1047	if (!adev->pm.dpm_enabled)
   1048		return;
   1049
   1050	if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
   1051				   AMDGPU_PP_SENSOR_GPU_TEMP,
   1052				   (void *)&temp,
   1053				   &size)) {
   1054		if (temp < adev->pm.dpm.thermal.min_temp)
   1055			/* switch back the user state */
   1056			dpm_state = adev->pm.dpm.user_state;
   1057	} else {
   1058		if (adev->pm.dpm.thermal.high_to_low)
   1059			/* switch back the user state */
   1060			dpm_state = adev->pm.dpm.user_state;
   1061	}
   1062
   1063	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
   1064		adev->pm.dpm.thermal_active = true;
   1065	else
   1066		adev->pm.dpm.thermal_active = false;
   1067
   1068	adev->pm.dpm.state = dpm_state;
   1069
   1070	amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
   1071}