cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

soc21.c (20684B)


      1/*
      2 * Copyright 2021 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23#include <linux/firmware.h>
     24#include <linux/slab.h>
     25#include <linux/module.h>
     26#include <linux/pci.h>
     27
     28#include "amdgpu.h"
     29#include "amdgpu_atombios.h"
     30#include "amdgpu_ih.h"
     31#include "amdgpu_uvd.h"
     32#include "amdgpu_vce.h"
     33#include "amdgpu_ucode.h"
     34#include "amdgpu_psp.h"
     35#include "amdgpu_smu.h"
     36#include "atom.h"
     37#include "amd_pcie.h"
     38
     39#include "gc/gc_11_0_0_offset.h"
     40#include "gc/gc_11_0_0_sh_mask.h"
     41#include "mp/mp_13_0_0_offset.h"
     42
     43#include "soc15.h"
     44#include "soc15_common.h"
     45#include "soc21.h"
     46
     47static const struct amd_ip_funcs soc21_common_ip_funcs;
     48
     49/* SOC21 */
     50static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] =
     51{
     52	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
     53	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
     54};
     55
     56static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode =
     57{
     58	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array),
     59	.codec_array = vcn_4_0_0_video_codecs_encode_array,
     60};
     61
     62static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] =
     63{
     64	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
     65	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
     66	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
     67	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
     68	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
     69};
     70
     71static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode =
     72{
     73	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array),
     74	.codec_array = vcn_4_0_0_video_codecs_decode_array,
     75};
     76
     77static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
     78				 const struct amdgpu_video_codecs **codecs)
     79{
     80	switch (adev->ip_versions[UVD_HWIP][0]) {
     81
     82	case IP_VERSION(4, 0, 0):
     83		if (encode)
     84			*codecs = &vcn_4_0_0_video_codecs_encode;
     85		else
     86			*codecs = &vcn_4_0_0_video_codecs_decode;
     87		return 0;
     88	default:
     89		return -EINVAL;
     90	}
     91}
     92/*
     93 * Indirect registers accessor
     94 */
     95static u32 soc21_pcie_rreg(struct amdgpu_device *adev, u32 reg)
     96{
     97	unsigned long address, data;
     98	address = adev->nbio.funcs->get_pcie_index_offset(adev);
     99	data = adev->nbio.funcs->get_pcie_data_offset(adev);
    100
    101	return amdgpu_device_indirect_rreg(adev, address, data, reg);
    102}
    103
    104static void soc21_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
    105{
    106	unsigned long address, data;
    107
    108	address = adev->nbio.funcs->get_pcie_index_offset(adev);
    109	data = adev->nbio.funcs->get_pcie_data_offset(adev);
    110
    111	amdgpu_device_indirect_wreg(adev, address, data, reg, v);
    112}
    113
    114static u64 soc21_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
    115{
    116	unsigned long address, data;
    117	address = adev->nbio.funcs->get_pcie_index_offset(adev);
    118	data = adev->nbio.funcs->get_pcie_data_offset(adev);
    119
    120	return amdgpu_device_indirect_rreg64(adev, address, data, reg);
    121}
    122
    123static void soc21_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
    124{
    125	unsigned long address, data;
    126
    127	address = adev->nbio.funcs->get_pcie_index_offset(adev);
    128	data = adev->nbio.funcs->get_pcie_data_offset(adev);
    129
    130	amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
    131}
    132
    133static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg)
    134{
    135	unsigned long flags, address, data;
    136	u32 r;
    137
    138	address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX);
    139	data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA);
    140
    141	spin_lock_irqsave(&adev->didt_idx_lock, flags);
    142	WREG32(address, (reg));
    143	r = RREG32(data);
    144	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
    145	return r;
    146}
    147
    148static void soc21_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
    149{
    150	unsigned long flags, address, data;
    151
    152	address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX);
    153	data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA);
    154
    155	spin_lock_irqsave(&adev->didt_idx_lock, flags);
    156	WREG32(address, (reg));
    157	WREG32(data, (v));
    158	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
    159}
    160
    161static u32 soc21_get_config_memsize(struct amdgpu_device *adev)
    162{
    163	return adev->nbio.funcs->get_memsize(adev);
    164}
    165
    166static u32 soc21_get_xclk(struct amdgpu_device *adev)
    167{
    168	return adev->clock.spll.reference_freq;
    169}
    170
    171
    172void soc21_grbm_select(struct amdgpu_device *adev,
    173		     u32 me, u32 pipe, u32 queue, u32 vmid)
    174{
    175	u32 grbm_gfx_cntl = 0;
    176	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
    177	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
    178	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
    179	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
    180
    181	WREG32(SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL), grbm_gfx_cntl);
    182}
    183
    184static void soc21_vga_set_state(struct amdgpu_device *adev, bool state)
    185{
    186	/* todo */
    187}
    188
    189static bool soc21_read_disabled_bios(struct amdgpu_device *adev)
    190{
    191	/* todo */
    192	return false;
    193}
    194
    195static struct soc15_allowed_register_entry soc21_allowed_read_registers[] = {
    196	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS)},
    197	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS2)},
    198	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE0)},
    199	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE1)},
    200	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE2)},
    201	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE3)},
    202	{ SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_STATUS_REG)},
    203	{ SOC15_REG_ENTRY(SDMA1, 0, regSDMA1_STATUS_REG)},
    204	{ SOC15_REG_ENTRY(GC, 0, regCP_STAT)},
    205	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT1)},
    206	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT2)},
    207	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT3)},
    208	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_BUSY_STAT)},
    209	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STALLED_STAT1)},
    210	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STATUS)},
    211	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_BUSY_STAT)},
    212	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STALLED_STAT1)},
    213	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STATUS)},
    214	{ SOC15_REG_ENTRY(GC, 0, regGB_ADDR_CONFIG)},
    215};
    216
    217static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
    218					 u32 sh_num, u32 reg_offset)
    219{
    220	uint32_t val;
    221
    222	mutex_lock(&adev->grbm_idx_mutex);
    223	if (se_num != 0xffffffff || sh_num != 0xffffffff)
    224		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
    225
    226	val = RREG32(reg_offset);
    227
    228	if (se_num != 0xffffffff || sh_num != 0xffffffff)
    229		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
    230	mutex_unlock(&adev->grbm_idx_mutex);
    231	return val;
    232}
    233
    234static uint32_t soc21_get_register_value(struct amdgpu_device *adev,
    235				      bool indexed, u32 se_num,
    236				      u32 sh_num, u32 reg_offset)
    237{
    238	if (indexed) {
    239		return soc21_read_indexed_register(adev, se_num, sh_num, reg_offset);
    240	} else {
    241		if (reg_offset == SOC15_REG_OFFSET(GC, 0, regGB_ADDR_CONFIG) && adev->gfx.config.gb_addr_config)
    242			return adev->gfx.config.gb_addr_config;
    243		return RREG32(reg_offset);
    244	}
    245}
    246
    247static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
    248			    u32 sh_num, u32 reg_offset, u32 *value)
    249{
    250	uint32_t i;
    251	struct soc15_allowed_register_entry  *en;
    252
    253	*value = 0;
    254	for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
    255		en = &soc21_allowed_read_registers[i];
    256		if (adev->reg_offset[en->hwip][en->inst] &&
    257		    reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
    258				   + en->reg_offset))
    259			continue;
    260
    261		*value = soc21_get_register_value(adev,
    262					       soc21_allowed_read_registers[i].grbm_indexed,
    263					       se_num, sh_num, reg_offset);
    264		return 0;
    265	}
    266	return -EINVAL;
    267}
    268
    269#if 0
    270static int soc21_asic_mode1_reset(struct amdgpu_device *adev)
    271{
    272	u32 i;
    273	int ret = 0;
    274
    275	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
    276
    277	/* disable BM */
    278	pci_clear_master(adev->pdev);
    279
    280	amdgpu_device_cache_pci_state(adev->pdev);
    281
    282	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
    283		dev_info(adev->dev, "GPU smu mode1 reset\n");
    284		ret = amdgpu_dpm_mode1_reset(adev);
    285	} else {
    286		dev_info(adev->dev, "GPU psp mode1 reset\n");
    287		ret = psp_gpu_reset(adev);
    288	}
    289
    290	if (ret)
    291		dev_err(adev->dev, "GPU mode1 reset failed\n");
    292	amdgpu_device_load_pci_state(adev->pdev);
    293
    294	/* wait for asic to come out of reset */
    295	for (i = 0; i < adev->usec_timeout; i++) {
    296		u32 memsize = adev->nbio.funcs->get_memsize(adev);
    297
    298		if (memsize != 0xffffffff)
    299			break;
    300		udelay(1);
    301	}
    302
    303	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
    304
    305	return ret;
    306}
    307#endif
    308
    309static enum amd_reset_method
    310soc21_asic_reset_method(struct amdgpu_device *adev)
    311{
    312	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
    313	    amdgpu_reset_method == AMD_RESET_METHOD_BACO)
    314		return amdgpu_reset_method;
    315
    316	if (amdgpu_reset_method != -1)
    317		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
    318				  amdgpu_reset_method);
    319
    320	switch (adev->ip_versions[MP1_HWIP][0]) {
    321	case IP_VERSION(13, 0, 0):
    322		return AMD_RESET_METHOD_MODE1;
    323	default:
    324		if (amdgpu_dpm_is_baco_supported(adev))
    325			return AMD_RESET_METHOD_BACO;
    326		else
    327			return AMD_RESET_METHOD_MODE1;
    328	}
    329}
    330
    331static int soc21_asic_reset(struct amdgpu_device *adev)
    332{
    333	int ret = 0;
    334
    335	switch (soc21_asic_reset_method(adev)) {
    336	case AMD_RESET_METHOD_PCI:
    337		dev_info(adev->dev, "PCI reset\n");
    338		ret = amdgpu_device_pci_reset(adev);
    339		break;
    340	case AMD_RESET_METHOD_BACO:
    341		dev_info(adev->dev, "BACO reset\n");
    342		ret = amdgpu_dpm_baco_reset(adev);
    343		break;
    344	default:
    345		dev_info(adev->dev, "MODE1 reset\n");
    346		ret = amdgpu_device_mode1_reset(adev);
    347		break;
    348	}
    349
    350	return ret;
    351}
    352
    353static int soc21_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
    354{
    355	/* todo */
    356	return 0;
    357}
    358
    359static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
    360{
    361	/* todo */
    362	return 0;
    363}
    364
    365static void soc21_pcie_gen3_enable(struct amdgpu_device *adev)
    366{
    367	if (pci_is_root_bus(adev->pdev->bus))
    368		return;
    369
    370	if (amdgpu_pcie_gen2 == 0)
    371		return;
    372
    373	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
    374					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
    375		return;
    376
    377	/* todo */
    378}
    379
    380static void soc21_program_aspm(struct amdgpu_device *adev)
    381{
    382
    383	if (amdgpu_aspm == 0)
    384		return;
    385
    386	/* todo */
    387}
    388
    389static void soc21_enable_doorbell_aperture(struct amdgpu_device *adev,
    390					bool enable)
    391{
    392	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
    393	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
    394}
    395
    396const struct amdgpu_ip_block_version soc21_common_ip_block =
    397{
    398	.type = AMD_IP_BLOCK_TYPE_COMMON,
    399	.major = 1,
    400	.minor = 0,
    401	.rev = 0,
    402	.funcs = &soc21_common_ip_funcs,
    403};
    404
    405static uint32_t soc21_get_rev_id(struct amdgpu_device *adev)
    406{
    407	return adev->nbio.funcs->get_rev_id(adev);
    408}
    409
    410static bool soc21_need_full_reset(struct amdgpu_device *adev)
    411{
    412	return true;
    413}
    414
    415static bool soc21_need_reset_on_init(struct amdgpu_device *adev)
    416{
    417	u32 sol_reg;
    418
    419	if (adev->flags & AMD_IS_APU)
    420		return false;
    421
    422	/* Check sOS sign of life register to confirm sys driver and sOS
    423	 * are already been loaded.
    424	 */
    425	sol_reg = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
    426	if (sol_reg)
    427		return true;
    428
    429	return false;
    430}
    431
    432static uint64_t soc21_get_pcie_replay_count(struct amdgpu_device *adev)
    433{
    434
    435	/* TODO
    436	 * dummy implement for pcie_replay_count sysfs interface
    437	 * */
    438
    439	return 0;
    440}
    441
    442static void soc21_init_doorbell_index(struct amdgpu_device *adev)
    443{
    444	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
    445	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
    446	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
    447	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
    448	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
    449	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
    450	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
    451	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
    452	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
    453	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
    454	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
    455	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
    456	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
    457	adev->doorbell_index.gfx_userqueue_start =
    458		AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_START;
    459	adev->doorbell_index.gfx_userqueue_end =
    460		AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_END;
    461	adev->doorbell_index.mes_ring0 = AMDGPU_NAVI10_DOORBELL_MES_RING0;
    462	adev->doorbell_index.mes_ring1 = AMDGPU_NAVI10_DOORBELL_MES_RING1;
    463	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
    464	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
    465	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
    466	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
    467	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
    468	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
    469	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
    470	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
    471	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
    472
    473	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
    474	adev->doorbell_index.sdma_doorbell_range = 20;
    475}
    476
    477static void soc21_pre_asic_init(struct amdgpu_device *adev)
    478{
    479}
    480
    481static const struct amdgpu_asic_funcs soc21_asic_funcs =
    482{
    483	.read_disabled_bios = &soc21_read_disabled_bios,
    484	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
    485	.read_register = &soc21_read_register,
    486	.reset = &soc21_asic_reset,
    487	.reset_method = &soc21_asic_reset_method,
    488	.set_vga_state = &soc21_vga_set_state,
    489	.get_xclk = &soc21_get_xclk,
    490	.set_uvd_clocks = &soc21_set_uvd_clocks,
    491	.set_vce_clocks = &soc21_set_vce_clocks,
    492	.get_config_memsize = &soc21_get_config_memsize,
    493	.init_doorbell_index = &soc21_init_doorbell_index,
    494	.need_full_reset = &soc21_need_full_reset,
    495	.need_reset_on_init = &soc21_need_reset_on_init,
    496	.get_pcie_replay_count = &soc21_get_pcie_replay_count,
    497	.supports_baco = &amdgpu_dpm_is_baco_supported,
    498	.pre_asic_init = &soc21_pre_asic_init,
    499	.query_video_codecs = &soc21_query_video_codecs,
    500};
    501
    502static int soc21_common_early_init(void *handle)
    503{
    504#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
    505	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    506
    507	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
    508	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
    509	adev->smc_rreg = NULL;
    510	adev->smc_wreg = NULL;
    511	adev->pcie_rreg = &soc21_pcie_rreg;
    512	adev->pcie_wreg = &soc21_pcie_wreg;
    513	adev->pcie_rreg64 = &soc21_pcie_rreg64;
    514	adev->pcie_wreg64 = &soc21_pcie_wreg64;
    515	adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
    516	adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
    517
    518	/* TODO: will add them during VCN v2 implementation */
    519	adev->uvd_ctx_rreg = NULL;
    520	adev->uvd_ctx_wreg = NULL;
    521
    522	adev->didt_rreg = &soc21_didt_rreg;
    523	adev->didt_wreg = &soc21_didt_wreg;
    524
    525	adev->asic_funcs = &soc21_asic_funcs;
    526
    527	adev->rev_id = soc21_get_rev_id(adev);
    528	adev->external_rev_id = 0xff;
    529	switch (adev->ip_versions[GC_HWIP][0]) {
    530	case IP_VERSION(11, 0, 0):
    531		adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
    532			AMD_CG_SUPPORT_GFX_CGLS |
    533			AMD_CG_SUPPORT_GFX_3D_CGCG |
    534			AMD_CG_SUPPORT_GFX_3D_CGLS |
    535			AMD_CG_SUPPORT_GFX_MGCG |
    536			AMD_CG_SUPPORT_REPEATER_FGCG |
    537			AMD_CG_SUPPORT_GFX_FGCG |
    538			AMD_CG_SUPPORT_GFX_PERF_CLK |
    539			AMD_CG_SUPPORT_VCN_MGCG |
    540			AMD_CG_SUPPORT_JPEG_MGCG |
    541			AMD_CG_SUPPORT_ATHUB_MGCG |
    542			AMD_CG_SUPPORT_ATHUB_LS |
    543			AMD_CG_SUPPORT_MC_MGCG |
    544			AMD_CG_SUPPORT_MC_LS |
    545			AMD_CG_SUPPORT_IH_CG |
    546			AMD_CG_SUPPORT_HDP_SD;
    547		adev->pg_flags = AMD_PG_SUPPORT_VCN |
    548			AMD_PG_SUPPORT_VCN_DPG |
    549			AMD_PG_SUPPORT_JPEG |
    550			AMD_PG_SUPPORT_ATHUB |
    551			AMD_PG_SUPPORT_MMHUB;
    552		adev->external_rev_id = adev->rev_id + 0x1; // TODO: need update
    553		break;
    554	case IP_VERSION(11, 0, 2):
    555		adev->cg_flags =
    556			AMD_CG_SUPPORT_GFX_CGCG |
    557			AMD_CG_SUPPORT_GFX_CGLS |
    558			AMD_CG_SUPPORT_VCN_MGCG |
    559			AMD_CG_SUPPORT_JPEG_MGCG;
    560		adev->pg_flags =
    561			AMD_PG_SUPPORT_VCN |
    562			AMD_PG_SUPPORT_VCN_DPG |
    563			AMD_PG_SUPPORT_JPEG |
    564			AMD_PG_SUPPORT_ATHUB |
    565			AMD_PG_SUPPORT_MMHUB;
    566		adev->external_rev_id = adev->rev_id + 0x10;
    567		break;
    568	case IP_VERSION(11, 0, 1):
    569		adev->cg_flags = 0;
    570		adev->pg_flags = 0;
    571		adev->external_rev_id = adev->rev_id + 0x1;
    572		break;
    573	default:
    574		/* FIXME: not supported yet */
    575		return -EINVAL;
    576	}
    577
    578	return 0;
    579}
    580
    581static int soc21_common_late_init(void *handle)
    582{
    583	return 0;
    584}
    585
    586static int soc21_common_sw_init(void *handle)
    587{
    588	return 0;
    589}
    590
    591static int soc21_common_sw_fini(void *handle)
    592{
    593	return 0;
    594}
    595
    596static int soc21_common_hw_init(void *handle)
    597{
    598	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    599
    600	/* enable pcie gen2/3 link */
    601	soc21_pcie_gen3_enable(adev);
    602	/* enable aspm */
    603	soc21_program_aspm(adev);
    604	/* setup nbio registers */
    605	adev->nbio.funcs->init_registers(adev);
    606	/* remap HDP registers to a hole in mmio space,
    607	 * for the purpose of expose those registers
    608	 * to process space
    609	 */
    610	if (adev->nbio.funcs->remap_hdp_registers)
    611		adev->nbio.funcs->remap_hdp_registers(adev);
    612	/* enable the doorbell aperture */
    613	soc21_enable_doorbell_aperture(adev, true);
    614
    615	return 0;
    616}
    617
    618static int soc21_common_hw_fini(void *handle)
    619{
    620	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    621
    622	/* disable the doorbell aperture */
    623	soc21_enable_doorbell_aperture(adev, false);
    624
    625	return 0;
    626}
    627
    628static int soc21_common_suspend(void *handle)
    629{
    630	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    631
    632	return soc21_common_hw_fini(adev);
    633}
    634
    635static int soc21_common_resume(void *handle)
    636{
    637	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    638
    639	return soc21_common_hw_init(adev);
    640}
    641
    642static bool soc21_common_is_idle(void *handle)
    643{
    644	return true;
    645}
    646
    647static int soc21_common_wait_for_idle(void *handle)
    648{
    649	return 0;
    650}
    651
    652static int soc21_common_soft_reset(void *handle)
    653{
    654	return 0;
    655}
    656
    657static int soc21_common_set_clockgating_state(void *handle,
    658					   enum amd_clockgating_state state)
    659{
    660	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    661
    662	switch (adev->ip_versions[NBIO_HWIP][0]) {
    663	case IP_VERSION(4, 3, 0):
    664		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
    665				state == AMD_CG_STATE_GATE);
    666		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
    667				state == AMD_CG_STATE_GATE);
    668		adev->hdp.funcs->update_clock_gating(adev,
    669				state == AMD_CG_STATE_GATE);
    670		break;
    671	default:
    672		break;
    673	}
    674	return 0;
    675}
    676
    677static int soc21_common_set_powergating_state(void *handle,
    678					   enum amd_powergating_state state)
    679{
    680	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    681
    682	switch (adev->ip_versions[LSDMA_HWIP][0]) {
    683	case IP_VERSION(6, 0, 0):
    684	case IP_VERSION(6, 0, 2):
    685		adev->lsdma.funcs->update_memory_power_gating(adev,
    686				state == AMD_PG_STATE_GATE);
    687		break;
    688	default:
    689		break;
    690	}
    691
    692	return 0;
    693}
    694
    695static void soc21_common_get_clockgating_state(void *handle, u64 *flags)
    696{
    697	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    698
    699	adev->nbio.funcs->get_clockgating_state(adev, flags);
    700
    701	adev->hdp.funcs->get_clock_gating_state(adev, flags);
    702
    703	return;
    704}
    705
    706static const struct amd_ip_funcs soc21_common_ip_funcs = {
    707	.name = "soc21_common",
    708	.early_init = soc21_common_early_init,
    709	.late_init = soc21_common_late_init,
    710	.sw_init = soc21_common_sw_init,
    711	.sw_fini = soc21_common_sw_fini,
    712	.hw_init = soc21_common_hw_init,
    713	.hw_fini = soc21_common_hw_fini,
    714	.suspend = soc21_common_suspend,
    715	.resume = soc21_common_resume,
    716	.is_idle = soc21_common_is_idle,
    717	.wait_for_idle = soc21_common_wait_for_idle,
    718	.soft_reset = soc21_common_soft_reset,
    719	.set_clockgating_state = soc21_common_set_clockgating_state,
    720	.set_powergating_state = soc21_common_set_powergating_state,
    721	.get_clockgating_state = soc21_common_get_clockgating_state,
    722};