cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

smu7_smumgr.c (17737B)


      1/*
      2 * Copyright 2015 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24
     25#include "pp_debug.h"
     26#include "smumgr.h"
     27#include "smu_ucode_xfer_vi.h"
     28#include "ppatomctrl.h"
     29#include "cgs_common.h"
     30#include "smu7_ppsmc.h"
     31#include "smu7_smumgr.h"
     32#include "smu7_common.h"
     33
     34#include "polaris10_pwrvirus.h"
     35
     36#define SMU7_SMC_SIZE 0x20000
     37
     38static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit)
     39{
     40	PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
     41	PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
     42
     43	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr);
     44	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
     45	return 0;
     46}
     47
     48
     49int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
     50{
     51	uint32_t data;
     52	uint32_t addr;
     53	uint8_t *dest_byte;
     54	uint8_t i, data_byte[4] = {0};
     55	uint32_t *pdata = (uint32_t *)&data_byte;
     56
     57	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
     58	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
     59
     60	addr = smc_start_address;
     61
     62	while (byte_count >= 4) {
     63		smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
     64
     65		*dest = PP_SMC_TO_HOST_UL(data);
     66
     67		dest += 1;
     68		byte_count -= 4;
     69		addr += 4;
     70	}
     71
     72	if (byte_count) {
     73		smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
     74		*pdata = PP_SMC_TO_HOST_UL(data);
     75	/* Cast dest into byte type in dest_byte.  This way, we don't overflow if the allocated memory is not 4-byte aligned. */
     76		dest_byte = (uint8_t *)dest;
     77		for (i = 0; i < byte_count; i++)
     78			dest_byte[i] = data_byte[i];
     79	}
     80
     81	return 0;
     82}
     83
     84
     85int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
     86				const uint8_t *src, uint32_t byte_count, uint32_t limit)
     87{
     88	int result;
     89	uint32_t data = 0;
     90	uint32_t original_data;
     91	uint32_t addr = 0;
     92	uint32_t extra_shift;
     93
     94	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
     95	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
     96
     97	addr = smc_start_address;
     98
     99	while (byte_count >= 4) {
    100	/* Bytes are written into the SMC addres space with the MSB first. */
    101		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
    102
    103		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
    104
    105		if (0 != result)
    106			return result;
    107
    108		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
    109
    110		src += 4;
    111		byte_count -= 4;
    112		addr += 4;
    113	}
    114
    115	if (0 != byte_count) {
    116
    117		data = 0;
    118
    119		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
    120
    121		if (0 != result)
    122			return result;
    123
    124
    125		original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
    126
    127		extra_shift = 8 * (4 - byte_count);
    128
    129		while (byte_count > 0) {
    130			/* Bytes are written into the SMC addres space with the MSB first. */
    131			data = (0x100 * data) + *src++;
    132			byte_count--;
    133		}
    134
    135		data <<= extra_shift;
    136
    137		data |= (original_data & ~((~0UL) << extra_shift));
    138
    139		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
    140
    141		if (0 != result)
    142			return result;
    143
    144		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
    145	}
    146
    147	return 0;
    148}
    149
    150
    151int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr)
    152{
    153	static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
    154
    155	smu7_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
    156
    157	return 0;
    158}
    159
    160bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr)
    161{
    162	return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
    163	&& (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
    164}
    165
    166int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
    167{
    168	struct amdgpu_device *adev = hwmgr->adev;
    169	int ret;
    170
    171	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
    172
    173	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
    174
    175	if (ret == 0xFE)
    176		dev_dbg(adev->dev, "last message was not supported\n");
    177	else if (ret != 1)
    178		dev_info(adev->dev,
    179			"\nlast message was failed ret is %d\n", ret);
    180
    181	cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
    182	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
    183
    184	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
    185
    186	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
    187
    188	if (ret == 0xFE)
    189		dev_dbg(adev->dev, "message %x was not supported\n", msg);
    190	else if (ret != 1)
    191		dev_dbg(adev->dev,
    192			"failed to send message %x ret is %d \n",  msg, ret);
    193
    194	return 0;
    195}
    196
    197int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
    198{
    199	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
    200
    201	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
    202
    203	return smu7_send_msg_to_smc(hwmgr, msg);
    204}
    205
    206uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr)
    207{
    208	return cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
    209}
    210
    211int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
    212{
    213	return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL);
    214}
    215
    216enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
    217{
    218	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
    219
    220	switch (fw_type) {
    221	case UCODE_ID_SMU:
    222		result = CGS_UCODE_ID_SMU;
    223		break;
    224	case UCODE_ID_SMU_SK:
    225		result = CGS_UCODE_ID_SMU_SK;
    226		break;
    227	case UCODE_ID_SDMA0:
    228		result = CGS_UCODE_ID_SDMA0;
    229		break;
    230	case UCODE_ID_SDMA1:
    231		result = CGS_UCODE_ID_SDMA1;
    232		break;
    233	case UCODE_ID_CP_CE:
    234		result = CGS_UCODE_ID_CP_CE;
    235		break;
    236	case UCODE_ID_CP_PFP:
    237		result = CGS_UCODE_ID_CP_PFP;
    238		break;
    239	case UCODE_ID_CP_ME:
    240		result = CGS_UCODE_ID_CP_ME;
    241		break;
    242	case UCODE_ID_CP_MEC:
    243		result = CGS_UCODE_ID_CP_MEC;
    244		break;
    245	case UCODE_ID_CP_MEC_JT1:
    246		result = CGS_UCODE_ID_CP_MEC_JT1;
    247		break;
    248	case UCODE_ID_CP_MEC_JT2:
    249		result = CGS_UCODE_ID_CP_MEC_JT2;
    250		break;
    251	case UCODE_ID_RLC_G:
    252		result = CGS_UCODE_ID_RLC_G;
    253		break;
    254	case UCODE_ID_MEC_STORAGE:
    255		result = CGS_UCODE_ID_STORAGE;
    256		break;
    257	default:
    258		break;
    259	}
    260
    261	return result;
    262}
    263
    264
    265int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
    266{
    267	int result;
    268
    269	result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
    270
    271	*value = result ? 0 : cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
    272
    273	return result;
    274}
    275
    276int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
    277{
    278	int result;
    279
    280	result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
    281
    282	if (result)
    283		return result;
    284
    285	cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, value);
    286
    287	return 0;
    288}
    289
    290static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
    291						uint32_t fw_type,
    292						struct SMU_Entry *entry)
    293{
    294	int result = 0;
    295	struct cgs_firmware_info info = {0};
    296
    297	result = cgs_get_firmware_info(hwmgr->device,
    298				smu7_convert_fw_type_to_cgs(fw_type),
    299				&info);
    300
    301	if (!result) {
    302		entry->version = info.fw_version;
    303		entry->id = (uint16_t)fw_type;
    304		entry->image_addr_high = upper_32_bits(info.mc_addr);
    305		entry->image_addr_low = lower_32_bits(info.mc_addr);
    306		entry->meta_data_addr_high = 0;
    307		entry->meta_data_addr_low = 0;
    308
    309		/* digest need be excluded out */
    310		if (!hwmgr->not_vf)
    311			info.image_size -= 20;
    312		entry->data_size_byte = info.image_size;
    313		entry->num_register_entries = 0;
    314	}
    315
    316	if ((fw_type == UCODE_ID_RLC_G)
    317		|| (fw_type == UCODE_ID_CP_MEC))
    318		entry->flags = 1;
    319	else
    320		entry->flags = 0;
    321
    322	return 0;
    323}
    324
    325int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
    326{
    327	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
    328	uint32_t fw_to_load;
    329	int r = 0;
    330
    331	amdgpu_ucode_init_bo(hwmgr->adev);
    332
    333	if (smu_data->soft_regs_start)
    334		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
    335					smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
    336					SMU_SoftRegisters, UcodeLoadStatus),
    337					0x0);
    338
    339	if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
    340		if (hwmgr->not_vf) {
    341			smum_send_msg_to_smc_with_parameter(hwmgr,
    342						PPSMC_MSG_SMU_DRAM_ADDR_HI,
    343						upper_32_bits(smu_data->smu_buffer.mc_addr),
    344						NULL);
    345			smum_send_msg_to_smc_with_parameter(hwmgr,
    346						PPSMC_MSG_SMU_DRAM_ADDR_LO,
    347						lower_32_bits(smu_data->smu_buffer.mc_addr),
    348						NULL);
    349		}
    350		fw_to_load = UCODE_ID_RLC_G_MASK
    351			   + UCODE_ID_SDMA0_MASK
    352			   + UCODE_ID_SDMA1_MASK
    353			   + UCODE_ID_CP_CE_MASK
    354			   + UCODE_ID_CP_ME_MASK
    355			   + UCODE_ID_CP_PFP_MASK
    356			   + UCODE_ID_CP_MEC_MASK;
    357	} else {
    358		fw_to_load = UCODE_ID_RLC_G_MASK
    359			   + UCODE_ID_SDMA0_MASK
    360			   + UCODE_ID_SDMA1_MASK
    361			   + UCODE_ID_CP_CE_MASK
    362			   + UCODE_ID_CP_ME_MASK
    363			   + UCODE_ID_CP_PFP_MASK
    364			   + UCODE_ID_CP_MEC_MASK
    365			   + UCODE_ID_CP_MEC_JT1_MASK
    366			   + UCODE_ID_CP_MEC_JT2_MASK;
    367	}
    368
    369	if (!smu_data->toc) {
    370		struct SMU_DRAMData_TOC *toc;
    371
    372		smu_data->toc = kzalloc(sizeof(struct SMU_DRAMData_TOC), GFP_KERNEL);
    373		if (!smu_data->toc)
    374			return -ENOMEM;
    375		toc = smu_data->toc;
    376		toc->num_entries = 0;
    377		toc->structure_version = 1;
    378
    379		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
    380				UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
    381				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
    382		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
    383				UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
    384				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
    385		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
    386				UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
    387				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
    388		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
    389				UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
    390				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
    391		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
    392				UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
    393				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
    394		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
    395				UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
    396				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
    397		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
    398				UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
    399				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
    400		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
    401				UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
    402				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
    403		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
    404				UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
    405				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
    406		if (!hwmgr->not_vf)
    407			PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
    408				UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
    409				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
    410	}
    411	memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
    412		    sizeof(struct SMU_DRAMData_TOC));
    413	smum_send_msg_to_smc_with_parameter(hwmgr,
    414			PPSMC_MSG_DRV_DRAM_ADDR_HI,
    415			upper_32_bits(smu_data->header_buffer.mc_addr),
    416			NULL);
    417	smum_send_msg_to_smc_with_parameter(hwmgr,
    418			PPSMC_MSG_DRV_DRAM_ADDR_LO,
    419			lower_32_bits(smu_data->header_buffer.mc_addr),
    420			NULL);
    421
    422	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load, NULL);
    423
    424	r = smu7_check_fw_load_finish(hwmgr, fw_to_load);
    425	if (!r)
    426		return 0;
    427
    428	pr_err("SMU load firmware failed\n");
    429
    430failed:
    431	kfree(smu_data->toc);
    432	smu_data->toc = NULL;
    433	return r;
    434}
    435
    436/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
    437int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
    438{
    439	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
    440	uint32_t ret;
    441
    442	ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
    443					smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
    444					SMU_SoftRegisters, UcodeLoadStatus),
    445					fw_type, fw_type);
    446	return ret;
    447}
    448
    449int smu7_reload_firmware(struct pp_hwmgr *hwmgr)
    450{
    451	return hwmgr->smumgr_funcs->start_smu(hwmgr);
    452}
    453
    454static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit)
    455{
    456	uint32_t byte_count = length;
    457
    458	PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
    459
    460	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000);
    461	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
    462
    463	for (; byte_count >= 4; byte_count -= 4)
    464		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++);
    465
    466	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
    467
    468	PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be divisible by 4.", return -EINVAL);
    469
    470	return 0;
    471}
    472
    473
    474int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
    475{
    476	int result = 0;
    477	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
    478
    479	struct cgs_firmware_info info = {0};
    480
    481	if (smu_data->security_hard_key == 1)
    482		cgs_get_firmware_info(hwmgr->device,
    483			smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
    484	else
    485		cgs_get_firmware_info(hwmgr->device,
    486			smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
    487
    488	hwmgr->is_kicker = info.is_kicker;
    489	hwmgr->smu_version = info.version;
    490	result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
    491
    492	return result;
    493}
    494
    495static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
    496{
    497	int i;
    498	uint32_t reg, data;
    499
    500	for (i = 0; i < size; i++) {
    501		reg  = pvirus->reg;
    502		data = pvirus->data;
    503		if (reg != 0xffffffff)
    504			cgs_write_register(hwmgr->device, reg, data);
    505		else
    506			break;
    507		pvirus++;
    508	}
    509}
    510
    511static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
    512{
    513	int i;
    514
    515	cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
    516	cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
    517	cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
    518	for (i = 0; i < section->dfy_size; i++)
    519		cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
    520}
    521
    522int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
    523{
    524	execute_pwr_table(hwmgr, pwr_virus_table_pre, ARRAY_SIZE(pwr_virus_table_pre));
    525	execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
    526	execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
    527	execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
    528	execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
    529	execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
    530	execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
    531	execute_pwr_table(hwmgr, pwr_virus_table_post, ARRAY_SIZE(pwr_virus_table_post));
    532
    533	return 0;
    534}
    535
    536int smu7_init(struct pp_hwmgr *hwmgr)
    537{
    538	struct smu7_smumgr *smu_data;
    539	int r;
    540	/* Allocate memory for backend private data */
    541	smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
    542	smu_data->header_buffer.data_size =
    543			((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
    544
    545/* Allocate FW image data structure and header buffer and
    546 * send the header buffer address to SMU */
    547	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
    548		smu_data->header_buffer.data_size,
    549		PAGE_SIZE,
    550		AMDGPU_GEM_DOMAIN_VRAM,
    551		&smu_data->header_buffer.handle,
    552		&smu_data->header_buffer.mc_addr,
    553		&smu_data->header_buffer.kaddr);
    554
    555	if (r)
    556		return -EINVAL;
    557
    558	if (!hwmgr->not_vf)
    559		return 0;
    560
    561	smu_data->smu_buffer.data_size = 200*4096;
    562	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
    563		smu_data->smu_buffer.data_size,
    564		PAGE_SIZE,
    565		AMDGPU_GEM_DOMAIN_VRAM,
    566		&smu_data->smu_buffer.handle,
    567		&smu_data->smu_buffer.mc_addr,
    568		&smu_data->smu_buffer.kaddr);
    569
    570	if (r) {
    571		amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
    572					&smu_data->header_buffer.mc_addr,
    573					&smu_data->header_buffer.kaddr);
    574		return -EINVAL;
    575	}
    576
    577	if (smum_is_hw_avfs_present(hwmgr) &&
    578	    (hwmgr->feature_mask & PP_AVFS_MASK))
    579		hwmgr->avfs_supported = true;
    580
    581	return 0;
    582}
    583
    584
    585int smu7_smu_fini(struct pp_hwmgr *hwmgr)
    586{
    587	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
    588
    589	amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
    590					&smu_data->header_buffer.mc_addr,
    591					&smu_data->header_buffer.kaddr);
    592
    593	if (hwmgr->not_vf)
    594		amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle,
    595					&smu_data->smu_buffer.mc_addr,
    596					&smu_data->smu_buffer.kaddr);
    597
    598
    599	kfree(smu_data->toc);
    600	smu_data->toc = NULL;
    601	kfree(hwmgr->smu_backend);
    602	hwmgr->smu_backend = NULL;
    603	return 0;
    604}