cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vi.c (61825B)


      1/*
      2 * Copyright 2014 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#include <linux/pci.h>
     25#include <linux/slab.h>
     26
     27#include <drm/amdgpu_drm.h>
     28
     29#include "amdgpu.h"
     30#include "amdgpu_atombios.h"
     31#include "amdgpu_ih.h"
     32#include "amdgpu_uvd.h"
     33#include "amdgpu_vce.h"
     34#include "amdgpu_ucode.h"
     35#include "atom.h"
     36#include "amd_pcie.h"
     37
     38#include "gmc/gmc_8_1_d.h"
     39#include "gmc/gmc_8_1_sh_mask.h"
     40
     41#include "oss/oss_3_0_d.h"
     42#include "oss/oss_3_0_sh_mask.h"
     43
     44#include "bif/bif_5_0_d.h"
     45#include "bif/bif_5_0_sh_mask.h"
     46
     47#include "gca/gfx_8_0_d.h"
     48#include "gca/gfx_8_0_sh_mask.h"
     49
     50#include "smu/smu_7_1_1_d.h"
     51#include "smu/smu_7_1_1_sh_mask.h"
     52
     53#include "uvd/uvd_5_0_d.h"
     54#include "uvd/uvd_5_0_sh_mask.h"
     55
     56#include "vce/vce_3_0_d.h"
     57#include "vce/vce_3_0_sh_mask.h"
     58
     59#include "dce/dce_10_0_d.h"
     60#include "dce/dce_10_0_sh_mask.h"
     61
     62#include "vid.h"
     63#include "vi.h"
     64#include "gmc_v8_0.h"
     65#include "gmc_v7_0.h"
     66#include "gfx_v8_0.h"
     67#include "sdma_v2_4.h"
     68#include "sdma_v3_0.h"
     69#include "dce_v10_0.h"
     70#include "dce_v11_0.h"
     71#include "iceland_ih.h"
     72#include "tonga_ih.h"
     73#include "cz_ih.h"
     74#include "uvd_v5_0.h"
     75#include "uvd_v6_0.h"
     76#include "vce_v3_0.h"
     77#if defined(CONFIG_DRM_AMD_ACP)
     78#include "amdgpu_acp.h"
     79#endif
     80#include "amdgpu_vkms.h"
     81#include "mxgpu_vi.h"
     82#include "amdgpu_dm.h"
     83
     84#if IS_ENABLED(CONFIG_X86)
     85#include <asm/intel-family.h>
     86#endif
     87
     88#define ixPCIE_LC_L1_PM_SUBSTATE	0x100100C6
     89#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK	0x00000001L
     90#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK	0x00000002L
     91#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK	0x00000004L
     92#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK		0x00000008L
     93#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK		0x00000010L
     94#define ixPCIE_L1_PM_SUB_CNTL	0x378
     95#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK	0x00000004L
     96#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK	0x00000008L
     97#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK	0x00000001L
     98#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK	0x00000002L
     99#define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK		0x00200000L
    100#define LINK_CAP	0x64
    101#define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK	0x00040000L
    102#define ixCPM_CONTROL	0x1400118
    103#define ixPCIE_LC_CNTL7	0x100100BC
    104#define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK	0x00000400L
    105#define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT	0x00000007
    106#define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT	0x00000009
    107#define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK	0x01000000L
    108#define PCIE_L1_PM_SUB_CNTL	0x378
    109#define ASIC_IS_P22(asic_type, rid)	((asic_type >= CHIP_POLARIS10) && \
    110									(asic_type <= CHIP_POLARIS12) && \
    111									(rid >= 0x6E))
    112/* Topaz */
    113static const struct amdgpu_video_codecs topaz_video_codecs_encode =
    114{
    115	.codec_count = 0,
    116	.codec_array = NULL,
    117};
    118
    119/* Tonga, CZ, ST, Fiji */
    120static const struct amdgpu_video_codec_info tonga_video_codecs_encode_array[] =
    121{
    122	{
    123		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
    124		.max_width = 4096,
    125		.max_height = 2304,
    126		.max_pixels_per_frame = 4096 * 2304,
    127		.max_level = 0,
    128	},
    129};
    130
    131static const struct amdgpu_video_codecs tonga_video_codecs_encode =
    132{
    133	.codec_count = ARRAY_SIZE(tonga_video_codecs_encode_array),
    134	.codec_array = tonga_video_codecs_encode_array,
    135};
    136
    137/* Polaris */
    138static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] =
    139{
    140	{
    141		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
    142		.max_width = 4096,
    143		.max_height = 2304,
    144		.max_pixels_per_frame = 4096 * 2304,
    145		.max_level = 0,
    146	},
    147	{
    148		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
    149		.max_width = 4096,
    150		.max_height = 2304,
    151		.max_pixels_per_frame = 4096 * 2304,
    152		.max_level = 0,
    153	},
    154};
    155
    156static const struct amdgpu_video_codecs polaris_video_codecs_encode =
    157{
    158	.codec_count = ARRAY_SIZE(polaris_video_codecs_encode_array),
    159	.codec_array = polaris_video_codecs_encode_array,
    160};
    161
    162/* Topaz */
    163static const struct amdgpu_video_codecs topaz_video_codecs_decode =
    164{
    165	.codec_count = 0,
    166	.codec_array = NULL,
    167};
    168
    169/* Tonga */
    170static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
    171{
    172	{
    173		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
    174		.max_width = 4096,
    175		.max_height = 4096,
    176		.max_pixels_per_frame = 4096 * 4096,
    177		.max_level = 3,
    178	},
    179	{
    180		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
    181		.max_width = 4096,
    182		.max_height = 4096,
    183		.max_pixels_per_frame = 4096 * 4096,
    184		.max_level = 5,
    185	},
    186	{
    187		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
    188		.max_width = 4096,
    189		.max_height = 4096,
    190		.max_pixels_per_frame = 4096 * 4096,
    191		.max_level = 52,
    192	},
    193	{
    194		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
    195		.max_width = 4096,
    196		.max_height = 4096,
    197		.max_pixels_per_frame = 4096 * 4096,
    198		.max_level = 4,
    199	},
    200};
    201
    202static const struct amdgpu_video_codecs tonga_video_codecs_decode =
    203{
    204	.codec_count = ARRAY_SIZE(tonga_video_codecs_decode_array),
    205	.codec_array = tonga_video_codecs_decode_array,
    206};
    207
    208/* CZ, ST, Fiji, Polaris */
    209static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
    210{
    211	{
    212		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
    213		.max_width = 4096,
    214		.max_height = 4096,
    215		.max_pixels_per_frame = 4096 * 4096,
    216		.max_level = 3,
    217	},
    218	{
    219		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
    220		.max_width = 4096,
    221		.max_height = 4096,
    222		.max_pixels_per_frame = 4096 * 4096,
    223		.max_level = 5,
    224	},
    225	{
    226		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
    227		.max_width = 4096,
    228		.max_height = 4096,
    229		.max_pixels_per_frame = 4096 * 4096,
    230		.max_level = 52,
    231	},
    232	{
    233		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
    234		.max_width = 4096,
    235		.max_height = 4096,
    236		.max_pixels_per_frame = 4096 * 4096,
    237		.max_level = 4,
    238	},
    239	{
    240		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
    241		.max_width = 4096,
    242		.max_height = 4096,
    243		.max_pixels_per_frame = 4096 * 4096,
    244		.max_level = 186,
    245	},
    246	{
    247		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
    248		.max_width = 4096,
    249		.max_height = 4096,
    250		.max_pixels_per_frame = 4096 * 4096,
    251		.max_level = 0,
    252	},
    253};
    254
    255static const struct amdgpu_video_codecs cz_video_codecs_decode =
    256{
    257	.codec_count = ARRAY_SIZE(cz_video_codecs_decode_array),
    258	.codec_array = cz_video_codecs_decode_array,
    259};
    260
    261static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode,
    262				 const struct amdgpu_video_codecs **codecs)
    263{
    264	switch (adev->asic_type) {
    265	case CHIP_TOPAZ:
    266		if (encode)
    267			*codecs = &topaz_video_codecs_encode;
    268		else
    269			*codecs = &topaz_video_codecs_decode;
    270		return 0;
    271	case CHIP_TONGA:
    272		if (encode)
    273			*codecs = &tonga_video_codecs_encode;
    274		else
    275			*codecs = &tonga_video_codecs_decode;
    276		return 0;
    277	case CHIP_POLARIS10:
    278	case CHIP_POLARIS11:
    279	case CHIP_POLARIS12:
    280	case CHIP_VEGAM:
    281		if (encode)
    282			*codecs = &polaris_video_codecs_encode;
    283		else
    284			*codecs = &cz_video_codecs_decode;
    285		return 0;
    286	case CHIP_FIJI:
    287	case CHIP_CARRIZO:
    288	case CHIP_STONEY:
    289		if (encode)
    290			*codecs = &tonga_video_codecs_encode;
    291		else
    292			*codecs = &cz_video_codecs_decode;
    293		return 0;
    294	default:
    295		return -EINVAL;
    296	}
    297}
    298
    299/*
    300 * Indirect registers accessor
    301 */
    302static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
    303{
    304	unsigned long flags;
    305	u32 r;
    306
    307	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
    308	WREG32_NO_KIQ(mmPCIE_INDEX, reg);
    309	(void)RREG32_NO_KIQ(mmPCIE_INDEX);
    310	r = RREG32_NO_KIQ(mmPCIE_DATA);
    311	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
    312	return r;
    313}
    314
    315static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
    316{
    317	unsigned long flags;
    318
    319	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
    320	WREG32_NO_KIQ(mmPCIE_INDEX, reg);
    321	(void)RREG32_NO_KIQ(mmPCIE_INDEX);
    322	WREG32_NO_KIQ(mmPCIE_DATA, v);
    323	(void)RREG32_NO_KIQ(mmPCIE_DATA);
    324	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
    325}
    326
    327static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
    328{
    329	unsigned long flags;
    330	u32 r;
    331
    332	spin_lock_irqsave(&adev->smc_idx_lock, flags);
    333	WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
    334	r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
    335	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
    336	return r;
    337}
    338
    339static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
    340{
    341	unsigned long flags;
    342
    343	spin_lock_irqsave(&adev->smc_idx_lock, flags);
    344	WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
    345	WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
    346	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
    347}
    348
    349/* smu_8_0_d.h */
    350#define mmMP0PUB_IND_INDEX                                                      0x180
    351#define mmMP0PUB_IND_DATA                                                       0x181
    352
    353static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
    354{
    355	unsigned long flags;
    356	u32 r;
    357
    358	spin_lock_irqsave(&adev->smc_idx_lock, flags);
    359	WREG32(mmMP0PUB_IND_INDEX, (reg));
    360	r = RREG32(mmMP0PUB_IND_DATA);
    361	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
    362	return r;
    363}
    364
    365static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
    366{
    367	unsigned long flags;
    368
    369	spin_lock_irqsave(&adev->smc_idx_lock, flags);
    370	WREG32(mmMP0PUB_IND_INDEX, (reg));
    371	WREG32(mmMP0PUB_IND_DATA, (v));
    372	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
    373}
    374
    375static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
    376{
    377	unsigned long flags;
    378	u32 r;
    379
    380	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
    381	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
    382	r = RREG32(mmUVD_CTX_DATA);
    383	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
    384	return r;
    385}
    386
    387static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
    388{
    389	unsigned long flags;
    390
    391	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
    392	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
    393	WREG32(mmUVD_CTX_DATA, (v));
    394	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
    395}
    396
    397static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
    398{
    399	unsigned long flags;
    400	u32 r;
    401
    402	spin_lock_irqsave(&adev->didt_idx_lock, flags);
    403	WREG32(mmDIDT_IND_INDEX, (reg));
    404	r = RREG32(mmDIDT_IND_DATA);
    405	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
    406	return r;
    407}
    408
    409static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
    410{
    411	unsigned long flags;
    412
    413	spin_lock_irqsave(&adev->didt_idx_lock, flags);
    414	WREG32(mmDIDT_IND_INDEX, (reg));
    415	WREG32(mmDIDT_IND_DATA, (v));
    416	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
    417}
    418
    419static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
    420{
    421	unsigned long flags;
    422	u32 r;
    423
    424	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
    425	WREG32(mmGC_CAC_IND_INDEX, (reg));
    426	r = RREG32(mmGC_CAC_IND_DATA);
    427	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
    428	return r;
    429}
    430
    431static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
    432{
    433	unsigned long flags;
    434
    435	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
    436	WREG32(mmGC_CAC_IND_INDEX, (reg));
    437	WREG32(mmGC_CAC_IND_DATA, (v));
    438	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
    439}
    440
    441
    442static const u32 tonga_mgcg_cgcg_init[] =
    443{
    444	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
    445	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
    446	mmPCIE_DATA, 0x000f0000, 0x00000000,
    447	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
    448	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
    449	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
    450	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
    451};
    452
    453static const u32 fiji_mgcg_cgcg_init[] =
    454{
    455	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
    456	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
    457	mmPCIE_DATA, 0x000f0000, 0x00000000,
    458	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
    459	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
    460	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
    461	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
    462};
    463
    464static const u32 iceland_mgcg_cgcg_init[] =
    465{
    466	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
    467	mmPCIE_DATA, 0x000f0000, 0x00000000,
    468	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
    469	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
    470	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
    471};
    472
    473static const u32 cz_mgcg_cgcg_init[] =
    474{
    475	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
    476	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
    477	mmPCIE_DATA, 0x000f0000, 0x00000000,
    478	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
    479	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
    480};
    481
    482static const u32 stoney_mgcg_cgcg_init[] =
    483{
    484	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
    485	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
    486	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
    487};
    488
    489static void vi_init_golden_registers(struct amdgpu_device *adev)
    490{
    491	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
    492	mutex_lock(&adev->grbm_idx_mutex);
    493
    494	if (amdgpu_sriov_vf(adev)) {
    495		xgpu_vi_init_golden_registers(adev);
    496		mutex_unlock(&adev->grbm_idx_mutex);
    497		return;
    498	}
    499
    500	switch (adev->asic_type) {
    501	case CHIP_TOPAZ:
    502		amdgpu_device_program_register_sequence(adev,
    503							iceland_mgcg_cgcg_init,
    504							ARRAY_SIZE(iceland_mgcg_cgcg_init));
    505		break;
    506	case CHIP_FIJI:
    507		amdgpu_device_program_register_sequence(adev,
    508							fiji_mgcg_cgcg_init,
    509							ARRAY_SIZE(fiji_mgcg_cgcg_init));
    510		break;
    511	case CHIP_TONGA:
    512		amdgpu_device_program_register_sequence(adev,
    513							tonga_mgcg_cgcg_init,
    514							ARRAY_SIZE(tonga_mgcg_cgcg_init));
    515		break;
    516	case CHIP_CARRIZO:
    517		amdgpu_device_program_register_sequence(adev,
    518							cz_mgcg_cgcg_init,
    519							ARRAY_SIZE(cz_mgcg_cgcg_init));
    520		break;
    521	case CHIP_STONEY:
    522		amdgpu_device_program_register_sequence(adev,
    523							stoney_mgcg_cgcg_init,
    524							ARRAY_SIZE(stoney_mgcg_cgcg_init));
    525		break;
    526	case CHIP_POLARIS10:
    527	case CHIP_POLARIS11:
    528	case CHIP_POLARIS12:
    529	case CHIP_VEGAM:
    530	default:
    531		break;
    532	}
    533	mutex_unlock(&adev->grbm_idx_mutex);
    534}
    535
    536/**
    537 * vi_get_xclk - get the xclk
    538 *
    539 * @adev: amdgpu_device pointer
    540 *
    541 * Returns the reference clock used by the gfx engine
    542 * (VI).
    543 */
    544static u32 vi_get_xclk(struct amdgpu_device *adev)
    545{
    546	u32 reference_clock = adev->clock.spll.reference_freq;
    547	u32 tmp;
    548
    549	if (adev->flags & AMD_IS_APU)
    550		return reference_clock;
    551
    552	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
    553	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
    554		return 1000;
    555
    556	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
    557	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
    558		return reference_clock / 4;
    559
    560	return reference_clock;
    561}
    562
    563/**
    564 * vi_srbm_select - select specific register instances
    565 *
    566 * @adev: amdgpu_device pointer
    567 * @me: selected ME (micro engine)
    568 * @pipe: pipe
    569 * @queue: queue
    570 * @vmid: VMID
    571 *
    572 * Switches the currently active registers instances.  Some
    573 * registers are instanced per VMID, others are instanced per
    574 * me/pipe/queue combination.
    575 */
    576void vi_srbm_select(struct amdgpu_device *adev,
    577		     u32 me, u32 pipe, u32 queue, u32 vmid)
    578{
    579	u32 srbm_gfx_cntl = 0;
    580	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
    581	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
    582	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
    583	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
    584	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
    585}
    586
    587static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
    588{
    589	/* todo */
    590}
    591
    592static bool vi_read_disabled_bios(struct amdgpu_device *adev)
    593{
    594	u32 bus_cntl;
    595	u32 d1vga_control = 0;
    596	u32 d2vga_control = 0;
    597	u32 vga_render_control = 0;
    598	u32 rom_cntl;
    599	bool r;
    600
    601	bus_cntl = RREG32(mmBUS_CNTL);
    602	if (adev->mode_info.num_crtc) {
    603		d1vga_control = RREG32(mmD1VGA_CONTROL);
    604		d2vga_control = RREG32(mmD2VGA_CONTROL);
    605		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
    606	}
    607	rom_cntl = RREG32_SMC(ixROM_CNTL);
    608
    609	/* enable the rom */
    610	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
    611	if (adev->mode_info.num_crtc) {
    612		/* Disable VGA mode */
    613		WREG32(mmD1VGA_CONTROL,
    614		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
    615					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
    616		WREG32(mmD2VGA_CONTROL,
    617		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
    618					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
    619		WREG32(mmVGA_RENDER_CONTROL,
    620		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
    621	}
    622	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
    623
    624	r = amdgpu_read_bios(adev);
    625
    626	/* restore regs */
    627	WREG32(mmBUS_CNTL, bus_cntl);
    628	if (adev->mode_info.num_crtc) {
    629		WREG32(mmD1VGA_CONTROL, d1vga_control);
    630		WREG32(mmD2VGA_CONTROL, d2vga_control);
    631		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
    632	}
    633	WREG32_SMC(ixROM_CNTL, rom_cntl);
    634	return r;
    635}
    636
    637static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
    638				  u8 *bios, u32 length_bytes)
    639{
    640	u32 *dw_ptr;
    641	unsigned long flags;
    642	u32 i, length_dw;
    643
    644	if (bios == NULL)
    645		return false;
    646	if (length_bytes == 0)
    647		return false;
    648	/* APU vbios image is part of sbios image */
    649	if (adev->flags & AMD_IS_APU)
    650		return false;
    651
    652	dw_ptr = (u32 *)bios;
    653	length_dw = ALIGN(length_bytes, 4) / 4;
    654	/* take the smc lock since we are using the smc index */
    655	spin_lock_irqsave(&adev->smc_idx_lock, flags);
    656	/* set rom index to 0 */
    657	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
    658	WREG32(mmSMC_IND_DATA_11, 0);
    659	/* set index to data for continous read */
    660	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
    661	for (i = 0; i < length_dw; i++)
    662		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
    663	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
    664
    665	return true;
    666}
    667
    668static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
    669	{mmGRBM_STATUS},
    670	{mmGRBM_STATUS2},
    671	{mmGRBM_STATUS_SE0},
    672	{mmGRBM_STATUS_SE1},
    673	{mmGRBM_STATUS_SE2},
    674	{mmGRBM_STATUS_SE3},
    675	{mmSRBM_STATUS},
    676	{mmSRBM_STATUS2},
    677	{mmSRBM_STATUS3},
    678	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
    679	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
    680	{mmCP_STAT},
    681	{mmCP_STALLED_STAT1},
    682	{mmCP_STALLED_STAT2},
    683	{mmCP_STALLED_STAT3},
    684	{mmCP_CPF_BUSY_STAT},
    685	{mmCP_CPF_STALLED_STAT1},
    686	{mmCP_CPF_STATUS},
    687	{mmCP_CPC_BUSY_STAT},
    688	{mmCP_CPC_STALLED_STAT1},
    689	{mmCP_CPC_STATUS},
    690	{mmGB_ADDR_CONFIG},
    691	{mmMC_ARB_RAMCFG},
    692	{mmGB_TILE_MODE0},
    693	{mmGB_TILE_MODE1},
    694	{mmGB_TILE_MODE2},
    695	{mmGB_TILE_MODE3},
    696	{mmGB_TILE_MODE4},
    697	{mmGB_TILE_MODE5},
    698	{mmGB_TILE_MODE6},
    699	{mmGB_TILE_MODE7},
    700	{mmGB_TILE_MODE8},
    701	{mmGB_TILE_MODE9},
    702	{mmGB_TILE_MODE10},
    703	{mmGB_TILE_MODE11},
    704	{mmGB_TILE_MODE12},
    705	{mmGB_TILE_MODE13},
    706	{mmGB_TILE_MODE14},
    707	{mmGB_TILE_MODE15},
    708	{mmGB_TILE_MODE16},
    709	{mmGB_TILE_MODE17},
    710	{mmGB_TILE_MODE18},
    711	{mmGB_TILE_MODE19},
    712	{mmGB_TILE_MODE20},
    713	{mmGB_TILE_MODE21},
    714	{mmGB_TILE_MODE22},
    715	{mmGB_TILE_MODE23},
    716	{mmGB_TILE_MODE24},
    717	{mmGB_TILE_MODE25},
    718	{mmGB_TILE_MODE26},
    719	{mmGB_TILE_MODE27},
    720	{mmGB_TILE_MODE28},
    721	{mmGB_TILE_MODE29},
    722	{mmGB_TILE_MODE30},
    723	{mmGB_TILE_MODE31},
    724	{mmGB_MACROTILE_MODE0},
    725	{mmGB_MACROTILE_MODE1},
    726	{mmGB_MACROTILE_MODE2},
    727	{mmGB_MACROTILE_MODE3},
    728	{mmGB_MACROTILE_MODE4},
    729	{mmGB_MACROTILE_MODE5},
    730	{mmGB_MACROTILE_MODE6},
    731	{mmGB_MACROTILE_MODE7},
    732	{mmGB_MACROTILE_MODE8},
    733	{mmGB_MACROTILE_MODE9},
    734	{mmGB_MACROTILE_MODE10},
    735	{mmGB_MACROTILE_MODE11},
    736	{mmGB_MACROTILE_MODE12},
    737	{mmGB_MACROTILE_MODE13},
    738	{mmGB_MACROTILE_MODE14},
    739	{mmGB_MACROTILE_MODE15},
    740	{mmCC_RB_BACKEND_DISABLE, true},
    741	{mmGC_USER_RB_BACKEND_DISABLE, true},
    742	{mmGB_BACKEND_MAP, false},
    743	{mmPA_SC_RASTER_CONFIG, true},
    744	{mmPA_SC_RASTER_CONFIG_1, true},
    745};
    746
    747static uint32_t vi_get_register_value(struct amdgpu_device *adev,
    748				      bool indexed, u32 se_num,
    749				      u32 sh_num, u32 reg_offset)
    750{
    751	if (indexed) {
    752		uint32_t val;
    753		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
    754		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
    755
    756		switch (reg_offset) {
    757		case mmCC_RB_BACKEND_DISABLE:
    758			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
    759		case mmGC_USER_RB_BACKEND_DISABLE:
    760			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
    761		case mmPA_SC_RASTER_CONFIG:
    762			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
    763		case mmPA_SC_RASTER_CONFIG_1:
    764			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
    765		}
    766
    767		mutex_lock(&adev->grbm_idx_mutex);
    768		if (se_num != 0xffffffff || sh_num != 0xffffffff)
    769			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
    770
    771		val = RREG32(reg_offset);
    772
    773		if (se_num != 0xffffffff || sh_num != 0xffffffff)
    774			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
    775		mutex_unlock(&adev->grbm_idx_mutex);
    776		return val;
    777	} else {
    778		unsigned idx;
    779
    780		switch (reg_offset) {
    781		case mmGB_ADDR_CONFIG:
    782			return adev->gfx.config.gb_addr_config;
    783		case mmMC_ARB_RAMCFG:
    784			return adev->gfx.config.mc_arb_ramcfg;
    785		case mmGB_TILE_MODE0:
    786		case mmGB_TILE_MODE1:
    787		case mmGB_TILE_MODE2:
    788		case mmGB_TILE_MODE3:
    789		case mmGB_TILE_MODE4:
    790		case mmGB_TILE_MODE5:
    791		case mmGB_TILE_MODE6:
    792		case mmGB_TILE_MODE7:
    793		case mmGB_TILE_MODE8:
    794		case mmGB_TILE_MODE9:
    795		case mmGB_TILE_MODE10:
    796		case mmGB_TILE_MODE11:
    797		case mmGB_TILE_MODE12:
    798		case mmGB_TILE_MODE13:
    799		case mmGB_TILE_MODE14:
    800		case mmGB_TILE_MODE15:
    801		case mmGB_TILE_MODE16:
    802		case mmGB_TILE_MODE17:
    803		case mmGB_TILE_MODE18:
    804		case mmGB_TILE_MODE19:
    805		case mmGB_TILE_MODE20:
    806		case mmGB_TILE_MODE21:
    807		case mmGB_TILE_MODE22:
    808		case mmGB_TILE_MODE23:
    809		case mmGB_TILE_MODE24:
    810		case mmGB_TILE_MODE25:
    811		case mmGB_TILE_MODE26:
    812		case mmGB_TILE_MODE27:
    813		case mmGB_TILE_MODE28:
    814		case mmGB_TILE_MODE29:
    815		case mmGB_TILE_MODE30:
    816		case mmGB_TILE_MODE31:
    817			idx = (reg_offset - mmGB_TILE_MODE0);
    818			return adev->gfx.config.tile_mode_array[idx];
    819		case mmGB_MACROTILE_MODE0:
    820		case mmGB_MACROTILE_MODE1:
    821		case mmGB_MACROTILE_MODE2:
    822		case mmGB_MACROTILE_MODE3:
    823		case mmGB_MACROTILE_MODE4:
    824		case mmGB_MACROTILE_MODE5:
    825		case mmGB_MACROTILE_MODE6:
    826		case mmGB_MACROTILE_MODE7:
    827		case mmGB_MACROTILE_MODE8:
    828		case mmGB_MACROTILE_MODE9:
    829		case mmGB_MACROTILE_MODE10:
    830		case mmGB_MACROTILE_MODE11:
    831		case mmGB_MACROTILE_MODE12:
    832		case mmGB_MACROTILE_MODE13:
    833		case mmGB_MACROTILE_MODE14:
    834		case mmGB_MACROTILE_MODE15:
    835			idx = (reg_offset - mmGB_MACROTILE_MODE0);
    836			return adev->gfx.config.macrotile_mode_array[idx];
    837		default:
    838			return RREG32(reg_offset);
    839		}
    840	}
    841}
    842
    843static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
    844			    u32 sh_num, u32 reg_offset, u32 *value)
    845{
    846	uint32_t i;
    847
    848	*value = 0;
    849	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
    850		bool indexed = vi_allowed_read_registers[i].grbm_indexed;
    851
    852		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
    853			continue;
    854
    855		*value = vi_get_register_value(adev, indexed, se_num, sh_num,
    856					       reg_offset);
    857		return 0;
    858	}
    859	return -EINVAL;
    860}
    861
    862/**
    863 * vi_asic_pci_config_reset - soft reset GPU
    864 *
    865 * @adev: amdgpu_device pointer
    866 *
    867 * Use PCI Config method to reset the GPU.
    868 *
    869 * Returns 0 for success.
    870 */
    871static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
    872{
    873	u32 i;
    874	int r = -EINVAL;
    875
    876	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
    877
    878	/* disable BM */
    879	pci_clear_master(adev->pdev);
    880	/* reset */
    881	amdgpu_device_pci_config_reset(adev);
    882
    883	udelay(100);
    884
    885	/* wait for asic to come out of reset */
    886	for (i = 0; i < adev->usec_timeout; i++) {
    887		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
    888			/* enable BM */
    889			pci_set_master(adev->pdev);
    890			adev->has_hw_reset = true;
    891			r = 0;
    892			break;
    893		}
    894		udelay(1);
    895	}
    896
    897	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
    898
    899	return r;
    900}
    901
    902static bool vi_asic_supports_baco(struct amdgpu_device *adev)
    903{
    904	switch (adev->asic_type) {
    905	case CHIP_FIJI:
    906	case CHIP_TONGA:
    907	case CHIP_POLARIS10:
    908	case CHIP_POLARIS11:
    909	case CHIP_POLARIS12:
    910	case CHIP_TOPAZ:
    911		return amdgpu_dpm_is_baco_supported(adev);
    912	default:
    913		return false;
    914	}
    915}
    916
    917static enum amd_reset_method
    918vi_asic_reset_method(struct amdgpu_device *adev)
    919{
    920	bool baco_reset;
    921
    922	if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
    923	    amdgpu_reset_method == AMD_RESET_METHOD_BACO)
    924		return amdgpu_reset_method;
    925
    926	if (amdgpu_reset_method != -1)
    927		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
    928				  amdgpu_reset_method);
    929
    930	switch (adev->asic_type) {
    931	case CHIP_FIJI:
    932	case CHIP_TONGA:
    933	case CHIP_POLARIS10:
    934	case CHIP_POLARIS11:
    935	case CHIP_POLARIS12:
    936	case CHIP_TOPAZ:
    937		baco_reset = amdgpu_dpm_is_baco_supported(adev);
    938		break;
    939	default:
    940		baco_reset = false;
    941		break;
    942	}
    943
    944	if (baco_reset)
    945		return AMD_RESET_METHOD_BACO;
    946	else
    947		return AMD_RESET_METHOD_LEGACY;
    948}
    949
    950/**
    951 * vi_asic_reset - soft reset GPU
    952 *
    953 * @adev: amdgpu_device pointer
    954 *
    955 * Look up which blocks are hung and attempt
    956 * to reset them.
    957 * Returns 0 for success.
    958 */
    959static int vi_asic_reset(struct amdgpu_device *adev)
    960{
    961	int r;
    962
    963	/* APUs don't have full asic reset */
    964	if (adev->flags & AMD_IS_APU)
    965		return 0;
    966
    967	if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
    968		dev_info(adev->dev, "BACO reset\n");
    969		r = amdgpu_dpm_baco_reset(adev);
    970	} else {
    971		dev_info(adev->dev, "PCI CONFIG reset\n");
    972		r = vi_asic_pci_config_reset(adev);
    973	}
    974
    975	return r;
    976}
    977
    978static u32 vi_get_config_memsize(struct amdgpu_device *adev)
    979{
    980	return RREG32(mmCONFIG_MEMSIZE);
    981}
    982
    983static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
    984			u32 cntl_reg, u32 status_reg)
    985{
    986	int r, i;
    987	struct atom_clock_dividers dividers;
    988	uint32_t tmp;
    989
    990	r = amdgpu_atombios_get_clock_dividers(adev,
    991					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
    992					       clock, false, &dividers);
    993	if (r)
    994		return r;
    995
    996	tmp = RREG32_SMC(cntl_reg);
    997
    998	if (adev->flags & AMD_IS_APU)
    999		tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
   1000	else
   1001		tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
   1002				CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
   1003	tmp |= dividers.post_divider;
   1004	WREG32_SMC(cntl_reg, tmp);
   1005
   1006	for (i = 0; i < 100; i++) {
   1007		tmp = RREG32_SMC(status_reg);
   1008		if (adev->flags & AMD_IS_APU) {
   1009			if (tmp & 0x10000)
   1010				break;
   1011		} else {
   1012			if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
   1013				break;
   1014		}
   1015		mdelay(10);
   1016	}
   1017	if (i == 100)
   1018		return -ETIMEDOUT;
   1019	return 0;
   1020}
   1021
   1022#define ixGNB_CLK1_DFS_CNTL 0xD82200F0
   1023#define ixGNB_CLK1_STATUS   0xD822010C
   1024#define ixGNB_CLK2_DFS_CNTL 0xD8220110
   1025#define ixGNB_CLK2_STATUS   0xD822012C
   1026#define ixGNB_CLK3_DFS_CNTL 0xD8220130
   1027#define ixGNB_CLK3_STATUS   0xD822014C
   1028
   1029static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
   1030{
   1031	int r;
   1032
   1033	if (adev->flags & AMD_IS_APU) {
   1034		r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
   1035		if (r)
   1036			return r;
   1037
   1038		r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
   1039		if (r)
   1040			return r;
   1041	} else {
   1042		r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
   1043		if (r)
   1044			return r;
   1045
   1046		r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
   1047		if (r)
   1048			return r;
   1049	}
   1050
   1051	return 0;
   1052}
   1053
   1054static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
   1055{
   1056	int r, i;
   1057	struct atom_clock_dividers dividers;
   1058	u32 tmp;
   1059	u32 reg_ctrl;
   1060	u32 reg_status;
   1061	u32 status_mask;
   1062	u32 reg_mask;
   1063
   1064	if (adev->flags & AMD_IS_APU) {
   1065		reg_ctrl = ixGNB_CLK3_DFS_CNTL;
   1066		reg_status = ixGNB_CLK3_STATUS;
   1067		status_mask = 0x00010000;
   1068		reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
   1069	} else {
   1070		reg_ctrl = ixCG_ECLK_CNTL;
   1071		reg_status = ixCG_ECLK_STATUS;
   1072		status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
   1073		reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
   1074	}
   1075
   1076	r = amdgpu_atombios_get_clock_dividers(adev,
   1077					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
   1078					       ecclk, false, &dividers);
   1079	if (r)
   1080		return r;
   1081
   1082	for (i = 0; i < 100; i++) {
   1083		if (RREG32_SMC(reg_status) & status_mask)
   1084			break;
   1085		mdelay(10);
   1086	}
   1087
   1088	if (i == 100)
   1089		return -ETIMEDOUT;
   1090
   1091	tmp = RREG32_SMC(reg_ctrl);
   1092	tmp &= ~reg_mask;
   1093	tmp |= dividers.post_divider;
   1094	WREG32_SMC(reg_ctrl, tmp);
   1095
   1096	for (i = 0; i < 100; i++) {
   1097		if (RREG32_SMC(reg_status) & status_mask)
   1098			break;
   1099		mdelay(10);
   1100	}
   1101
   1102	if (i == 100)
   1103		return -ETIMEDOUT;
   1104
   1105	return 0;
   1106}
   1107
   1108static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
   1109{
   1110	if (pci_is_root_bus(adev->pdev->bus))
   1111		return;
   1112
   1113	if (amdgpu_pcie_gen2 == 0)
   1114		return;
   1115
   1116	if (adev->flags & AMD_IS_APU)
   1117		return;
   1118
   1119	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
   1120					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
   1121		return;
   1122
   1123	/* todo */
   1124}
   1125
   1126static void vi_enable_aspm(struct amdgpu_device *adev)
   1127{
   1128	u32 data, orig;
   1129
   1130	orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
   1131	data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT <<
   1132			PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
   1133	data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT <<
   1134			PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
   1135	data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
   1136	data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK;
   1137	if (orig != data)
   1138		WREG32_PCIE(ixPCIE_LC_CNTL, data);
   1139}
   1140
   1141static bool aspm_support_quirk_check(void)
   1142{
   1143#if IS_ENABLED(CONFIG_X86)
   1144	struct cpuinfo_x86 *c = &cpu_data(0);
   1145
   1146	return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
   1147#else
   1148	return true;
   1149#endif
   1150}
   1151
   1152static void vi_program_aspm(struct amdgpu_device *adev)
   1153{
   1154	u32 data, data1, orig;
   1155	bool bL1SS = false;
   1156	bool bClkReqSupport = true;
   1157
   1158	if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
   1159		return;
   1160
   1161	if (adev->flags & AMD_IS_APU ||
   1162	    adev->asic_type < CHIP_POLARIS10)
   1163		return;
   1164
   1165	orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
   1166	data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
   1167	data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
   1168	data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
   1169	if (orig != data)
   1170		WREG32_PCIE(ixPCIE_LC_CNTL, data);
   1171
   1172	orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
   1173	data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
   1174	data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT;
   1175	data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
   1176	if (orig != data)
   1177		WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
   1178
   1179	orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
   1180	data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
   1181	if (orig != data)
   1182		WREG32_PCIE(ixPCIE_LC_CNTL3, data);
   1183
   1184	orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
   1185	data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
   1186	if (orig != data)
   1187		WREG32_PCIE(ixPCIE_P_CNTL, data);
   1188
   1189	data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE);
   1190	pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1);
   1191	if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK &&
   1192	    (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK |
   1193		    PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK |
   1194			PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK |
   1195			PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) {
   1196		bL1SS = true;
   1197	} else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK |
   1198	    PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK |
   1199	    PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK |
   1200	    PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) {
   1201		bL1SS = true;
   1202	}
   1203
   1204	orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6);
   1205	data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK;
   1206	if (orig != data)
   1207		WREG32_PCIE(ixPCIE_LC_CNTL6, data);
   1208
   1209	orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
   1210	data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
   1211	if (orig != data)
   1212		WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
   1213
   1214	pci_read_config_dword(adev->pdev, LINK_CAP, &data);
   1215	if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK))
   1216		bClkReqSupport = false;
   1217
   1218	if (bClkReqSupport) {
   1219		orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
   1220		data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
   1221		data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
   1222				(1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
   1223		if (orig != data)
   1224			WREG32_SMC(ixTHM_CLK_CNTL, data);
   1225
   1226		orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
   1227		data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
   1228			MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK);
   1229		data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
   1230				(1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
   1231		data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT);
   1232		if (orig != data)
   1233			WREG32_SMC(ixMISC_CLK_CTRL, data);
   1234
   1235		orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
   1236		data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK;
   1237		if (orig != data)
   1238			WREG32_SMC(ixCG_CLKPIN_CNTL, data);
   1239
   1240		orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
   1241		data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK;
   1242		if (orig != data)
   1243			WREG32_SMC(ixCG_CLKPIN_CNTL, data);
   1244
   1245		orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
   1246		data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
   1247		data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
   1248		if (orig != data)
   1249			WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
   1250
   1251		orig = data = RREG32_PCIE(ixCPM_CONTROL);
   1252		data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK |
   1253				CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK);
   1254		if (orig != data)
   1255			WREG32_PCIE(ixCPM_CONTROL, data);
   1256
   1257		orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL);
   1258		data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK;
   1259		data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT);
   1260		if (orig != data)
   1261			WREG32_PCIE(ixPCIE_CONFIG_CNTL, data);
   1262
   1263		orig = data = RREG32(mmBIF_CLK_CTRL);
   1264		data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK;
   1265		if (orig != data)
   1266			WREG32(mmBIF_CLK_CTRL, data);
   1267
   1268		orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7);
   1269		data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK;
   1270		if (orig != data)
   1271			WREG32_PCIE(ixPCIE_LC_CNTL7, data);
   1272
   1273		orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG);
   1274		data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK;
   1275		if (orig != data)
   1276			WREG32_PCIE(ixPCIE_HW_DEBUG, data);
   1277
   1278		orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
   1279		data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
   1280		data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
   1281		if (bL1SS)
   1282			data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
   1283		if (orig != data)
   1284			WREG32_PCIE(ixPCIE_LC_CNTL2, data);
   1285
   1286	}
   1287
   1288	vi_enable_aspm(adev);
   1289
   1290	data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
   1291	data1 = RREG32_PCIE(ixPCIE_LC_STATUS1);
   1292	if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) &&
   1293	    data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK &&
   1294	    data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) {
   1295		orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
   1296		data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
   1297		if (orig != data)
   1298			WREG32_PCIE(ixPCIE_LC_CNTL, data);
   1299	}
   1300
   1301	if ((adev->asic_type == CHIP_POLARIS12 &&
   1302	    !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) ||
   1303	    ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) {
   1304		orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL);
   1305		data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK;
   1306		if (orig != data)
   1307			WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data);
   1308	}
   1309}
   1310
   1311static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
   1312					bool enable)
   1313{
   1314	u32 tmp;
   1315
   1316	/* not necessary on CZ */
   1317	if (adev->flags & AMD_IS_APU)
   1318		return;
   1319
   1320	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
   1321	if (enable)
   1322		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
   1323	else
   1324		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
   1325
   1326	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
   1327}
   1328
   1329#define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
   1330#define ATI_REV_ID_FUSE_MACRO__SHIFT        9
   1331#define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
   1332
   1333static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
   1334{
   1335	if (adev->flags & AMD_IS_APU)
   1336		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
   1337			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
   1338	else
   1339		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
   1340			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
   1341}
   1342
   1343static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
   1344{
   1345	if (!ring || !ring->funcs->emit_wreg) {
   1346		WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
   1347		RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
   1348	} else {
   1349		amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
   1350	}
   1351}
   1352
   1353static void vi_invalidate_hdp(struct amdgpu_device *adev,
   1354			      struct amdgpu_ring *ring)
   1355{
   1356	if (!ring || !ring->funcs->emit_wreg) {
   1357		WREG32(mmHDP_DEBUG0, 1);
   1358		RREG32(mmHDP_DEBUG0);
   1359	} else {
   1360		amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
   1361	}
   1362}
   1363
   1364static bool vi_need_full_reset(struct amdgpu_device *adev)
   1365{
   1366	switch (adev->asic_type) {
   1367	case CHIP_CARRIZO:
   1368	case CHIP_STONEY:
   1369		/* CZ has hang issues with full reset at the moment */
   1370		return false;
   1371	case CHIP_FIJI:
   1372	case CHIP_TONGA:
   1373		/* XXX: soft reset should work on fiji and tonga */
   1374		return true;
   1375	case CHIP_POLARIS10:
   1376	case CHIP_POLARIS11:
   1377	case CHIP_POLARIS12:
   1378	case CHIP_TOPAZ:
   1379	default:
   1380		/* change this when we support soft reset */
   1381		return true;
   1382	}
   1383}
   1384
   1385static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
   1386			      uint64_t *count1)
   1387{
   1388	uint32_t perfctr = 0;
   1389	uint64_t cnt0_of, cnt1_of;
   1390	int tmp;
   1391
   1392	/* This reports 0 on APUs, so return to avoid writing/reading registers
   1393	 * that may or may not be different from their GPU counterparts
   1394	 */
   1395	if (adev->flags & AMD_IS_APU)
   1396		return;
   1397
   1398	/* Set the 2 events that we wish to watch, defined above */
   1399	/* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
   1400	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
   1401	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
   1402
   1403	/* Write to enable desired perf counters */
   1404	WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
   1405	/* Zero out and enable the perf counters
   1406	 * Write 0x5:
   1407	 * Bit 0 = Start all counters(1)
   1408	 * Bit 2 = Global counter reset enable(1)
   1409	 */
   1410	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
   1411
   1412	msleep(1000);
   1413
   1414	/* Load the shadow and disable the perf counters
   1415	 * Write 0x2:
   1416	 * Bit 0 = Stop counters(0)
   1417	 * Bit 1 = Load the shadow counters(1)
   1418	 */
   1419	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
   1420
   1421	/* Read register values to get any >32bit overflow */
   1422	tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
   1423	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
   1424	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
   1425
   1426	/* Get the values and add the overflow */
   1427	*count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
   1428	*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
   1429}
   1430
   1431static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
   1432{
   1433	uint64_t nak_r, nak_g;
   1434
   1435	/* Get the number of NAKs received and generated */
   1436	nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
   1437	nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
   1438
   1439	/* Add the total number of NAKs, i.e the number of replays */
   1440	return (nak_r + nak_g);
   1441}
   1442
   1443static bool vi_need_reset_on_init(struct amdgpu_device *adev)
   1444{
   1445	u32 clock_cntl, pc;
   1446
   1447	if (adev->flags & AMD_IS_APU)
   1448		return false;
   1449
   1450	/* check if the SMC is already running */
   1451	clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
   1452	pc = RREG32_SMC(ixSMC_PC_C);
   1453	if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
   1454	    (0x20100 <= pc))
   1455		return true;
   1456
   1457	return false;
   1458}
   1459
   1460static void vi_pre_asic_init(struct amdgpu_device *adev)
   1461{
   1462}
   1463
   1464static const struct amdgpu_asic_funcs vi_asic_funcs =
   1465{
   1466	.read_disabled_bios = &vi_read_disabled_bios,
   1467	.read_bios_from_rom = &vi_read_bios_from_rom,
   1468	.read_register = &vi_read_register,
   1469	.reset = &vi_asic_reset,
   1470	.reset_method = &vi_asic_reset_method,
   1471	.set_vga_state = &vi_vga_set_state,
   1472	.get_xclk = &vi_get_xclk,
   1473	.set_uvd_clocks = &vi_set_uvd_clocks,
   1474	.set_vce_clocks = &vi_set_vce_clocks,
   1475	.get_config_memsize = &vi_get_config_memsize,
   1476	.flush_hdp = &vi_flush_hdp,
   1477	.invalidate_hdp = &vi_invalidate_hdp,
   1478	.need_full_reset = &vi_need_full_reset,
   1479	.init_doorbell_index = &legacy_doorbell_index_init,
   1480	.get_pcie_usage = &vi_get_pcie_usage,
   1481	.need_reset_on_init = &vi_need_reset_on_init,
   1482	.get_pcie_replay_count = &vi_get_pcie_replay_count,
   1483	.supports_baco = &vi_asic_supports_baco,
   1484	.pre_asic_init = &vi_pre_asic_init,
   1485	.query_video_codecs = &vi_query_video_codecs,
   1486};
   1487
   1488#define CZ_REV_BRISTOL(rev)	 \
   1489	((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
   1490
   1491static int vi_common_early_init(void *handle)
   1492{
   1493	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1494
   1495	if (adev->flags & AMD_IS_APU) {
   1496		adev->smc_rreg = &cz_smc_rreg;
   1497		adev->smc_wreg = &cz_smc_wreg;
   1498	} else {
   1499		adev->smc_rreg = &vi_smc_rreg;
   1500		adev->smc_wreg = &vi_smc_wreg;
   1501	}
   1502	adev->pcie_rreg = &vi_pcie_rreg;
   1503	adev->pcie_wreg = &vi_pcie_wreg;
   1504	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
   1505	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
   1506	adev->didt_rreg = &vi_didt_rreg;
   1507	adev->didt_wreg = &vi_didt_wreg;
   1508	adev->gc_cac_rreg = &vi_gc_cac_rreg;
   1509	adev->gc_cac_wreg = &vi_gc_cac_wreg;
   1510
   1511	adev->asic_funcs = &vi_asic_funcs;
   1512
   1513	adev->rev_id = vi_get_rev_id(adev);
   1514	adev->external_rev_id = 0xFF;
   1515	switch (adev->asic_type) {
   1516	case CHIP_TOPAZ:
   1517		adev->cg_flags = 0;
   1518		adev->pg_flags = 0;
   1519		adev->external_rev_id = 0x1;
   1520		break;
   1521	case CHIP_FIJI:
   1522		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
   1523			AMD_CG_SUPPORT_GFX_MGLS |
   1524			AMD_CG_SUPPORT_GFX_RLC_LS |
   1525			AMD_CG_SUPPORT_GFX_CP_LS |
   1526			AMD_CG_SUPPORT_GFX_CGTS |
   1527			AMD_CG_SUPPORT_GFX_CGTS_LS |
   1528			AMD_CG_SUPPORT_GFX_CGCG |
   1529			AMD_CG_SUPPORT_GFX_CGLS |
   1530			AMD_CG_SUPPORT_SDMA_MGCG |
   1531			AMD_CG_SUPPORT_SDMA_LS |
   1532			AMD_CG_SUPPORT_BIF_LS |
   1533			AMD_CG_SUPPORT_HDP_MGCG |
   1534			AMD_CG_SUPPORT_HDP_LS |
   1535			AMD_CG_SUPPORT_ROM_MGCG |
   1536			AMD_CG_SUPPORT_MC_MGCG |
   1537			AMD_CG_SUPPORT_MC_LS |
   1538			AMD_CG_SUPPORT_UVD_MGCG;
   1539		adev->pg_flags = 0;
   1540		adev->external_rev_id = adev->rev_id + 0x3c;
   1541		break;
   1542	case CHIP_TONGA:
   1543		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
   1544			AMD_CG_SUPPORT_GFX_CGCG |
   1545			AMD_CG_SUPPORT_GFX_CGLS |
   1546			AMD_CG_SUPPORT_SDMA_MGCG |
   1547			AMD_CG_SUPPORT_SDMA_LS |
   1548			AMD_CG_SUPPORT_BIF_LS |
   1549			AMD_CG_SUPPORT_HDP_MGCG |
   1550			AMD_CG_SUPPORT_HDP_LS |
   1551			AMD_CG_SUPPORT_ROM_MGCG |
   1552			AMD_CG_SUPPORT_MC_MGCG |
   1553			AMD_CG_SUPPORT_MC_LS |
   1554			AMD_CG_SUPPORT_DRM_LS |
   1555			AMD_CG_SUPPORT_UVD_MGCG;
   1556		adev->pg_flags = 0;
   1557		adev->external_rev_id = adev->rev_id + 0x14;
   1558		break;
   1559	case CHIP_POLARIS11:
   1560		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
   1561			AMD_CG_SUPPORT_GFX_RLC_LS |
   1562			AMD_CG_SUPPORT_GFX_CP_LS |
   1563			AMD_CG_SUPPORT_GFX_CGCG |
   1564			AMD_CG_SUPPORT_GFX_CGLS |
   1565			AMD_CG_SUPPORT_GFX_3D_CGCG |
   1566			AMD_CG_SUPPORT_GFX_3D_CGLS |
   1567			AMD_CG_SUPPORT_SDMA_MGCG |
   1568			AMD_CG_SUPPORT_SDMA_LS |
   1569			AMD_CG_SUPPORT_BIF_MGCG |
   1570			AMD_CG_SUPPORT_BIF_LS |
   1571			AMD_CG_SUPPORT_HDP_MGCG |
   1572			AMD_CG_SUPPORT_HDP_LS |
   1573			AMD_CG_SUPPORT_ROM_MGCG |
   1574			AMD_CG_SUPPORT_MC_MGCG |
   1575			AMD_CG_SUPPORT_MC_LS |
   1576			AMD_CG_SUPPORT_DRM_LS |
   1577			AMD_CG_SUPPORT_UVD_MGCG |
   1578			AMD_CG_SUPPORT_VCE_MGCG;
   1579		adev->pg_flags = 0;
   1580		adev->external_rev_id = adev->rev_id + 0x5A;
   1581		break;
   1582	case CHIP_POLARIS10:
   1583		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
   1584			AMD_CG_SUPPORT_GFX_RLC_LS |
   1585			AMD_CG_SUPPORT_GFX_CP_LS |
   1586			AMD_CG_SUPPORT_GFX_CGCG |
   1587			AMD_CG_SUPPORT_GFX_CGLS |
   1588			AMD_CG_SUPPORT_GFX_3D_CGCG |
   1589			AMD_CG_SUPPORT_GFX_3D_CGLS |
   1590			AMD_CG_SUPPORT_SDMA_MGCG |
   1591			AMD_CG_SUPPORT_SDMA_LS |
   1592			AMD_CG_SUPPORT_BIF_MGCG |
   1593			AMD_CG_SUPPORT_BIF_LS |
   1594			AMD_CG_SUPPORT_HDP_MGCG |
   1595			AMD_CG_SUPPORT_HDP_LS |
   1596			AMD_CG_SUPPORT_ROM_MGCG |
   1597			AMD_CG_SUPPORT_MC_MGCG |
   1598			AMD_CG_SUPPORT_MC_LS |
   1599			AMD_CG_SUPPORT_DRM_LS |
   1600			AMD_CG_SUPPORT_UVD_MGCG |
   1601			AMD_CG_SUPPORT_VCE_MGCG;
   1602		adev->pg_flags = 0;
   1603		adev->external_rev_id = adev->rev_id + 0x50;
   1604		break;
   1605	case CHIP_POLARIS12:
   1606		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
   1607			AMD_CG_SUPPORT_GFX_RLC_LS |
   1608			AMD_CG_SUPPORT_GFX_CP_LS |
   1609			AMD_CG_SUPPORT_GFX_CGCG |
   1610			AMD_CG_SUPPORT_GFX_CGLS |
   1611			AMD_CG_SUPPORT_GFX_3D_CGCG |
   1612			AMD_CG_SUPPORT_GFX_3D_CGLS |
   1613			AMD_CG_SUPPORT_SDMA_MGCG |
   1614			AMD_CG_SUPPORT_SDMA_LS |
   1615			AMD_CG_SUPPORT_BIF_MGCG |
   1616			AMD_CG_SUPPORT_BIF_LS |
   1617			AMD_CG_SUPPORT_HDP_MGCG |
   1618			AMD_CG_SUPPORT_HDP_LS |
   1619			AMD_CG_SUPPORT_ROM_MGCG |
   1620			AMD_CG_SUPPORT_MC_MGCG |
   1621			AMD_CG_SUPPORT_MC_LS |
   1622			AMD_CG_SUPPORT_DRM_LS |
   1623			AMD_CG_SUPPORT_UVD_MGCG |
   1624			AMD_CG_SUPPORT_VCE_MGCG;
   1625		adev->pg_flags = 0;
   1626		adev->external_rev_id = adev->rev_id + 0x64;
   1627		break;
   1628	case CHIP_VEGAM:
   1629		adev->cg_flags = 0;
   1630			/*AMD_CG_SUPPORT_GFX_MGCG |
   1631			AMD_CG_SUPPORT_GFX_RLC_LS |
   1632			AMD_CG_SUPPORT_GFX_CP_LS |
   1633			AMD_CG_SUPPORT_GFX_CGCG |
   1634			AMD_CG_SUPPORT_GFX_CGLS |
   1635			AMD_CG_SUPPORT_GFX_3D_CGCG |
   1636			AMD_CG_SUPPORT_GFX_3D_CGLS |
   1637			AMD_CG_SUPPORT_SDMA_MGCG |
   1638			AMD_CG_SUPPORT_SDMA_LS |
   1639			AMD_CG_SUPPORT_BIF_MGCG |
   1640			AMD_CG_SUPPORT_BIF_LS |
   1641			AMD_CG_SUPPORT_HDP_MGCG |
   1642			AMD_CG_SUPPORT_HDP_LS |
   1643			AMD_CG_SUPPORT_ROM_MGCG |
   1644			AMD_CG_SUPPORT_MC_MGCG |
   1645			AMD_CG_SUPPORT_MC_LS |
   1646			AMD_CG_SUPPORT_DRM_LS |
   1647			AMD_CG_SUPPORT_UVD_MGCG |
   1648			AMD_CG_SUPPORT_VCE_MGCG;*/
   1649		adev->pg_flags = 0;
   1650		adev->external_rev_id = adev->rev_id + 0x6E;
   1651		break;
   1652	case CHIP_CARRIZO:
   1653		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
   1654			AMD_CG_SUPPORT_GFX_MGCG |
   1655			AMD_CG_SUPPORT_GFX_MGLS |
   1656			AMD_CG_SUPPORT_GFX_RLC_LS |
   1657			AMD_CG_SUPPORT_GFX_CP_LS |
   1658			AMD_CG_SUPPORT_GFX_CGTS |
   1659			AMD_CG_SUPPORT_GFX_CGTS_LS |
   1660			AMD_CG_SUPPORT_GFX_CGCG |
   1661			AMD_CG_SUPPORT_GFX_CGLS |
   1662			AMD_CG_SUPPORT_BIF_LS |
   1663			AMD_CG_SUPPORT_HDP_MGCG |
   1664			AMD_CG_SUPPORT_HDP_LS |
   1665			AMD_CG_SUPPORT_SDMA_MGCG |
   1666			AMD_CG_SUPPORT_SDMA_LS |
   1667			AMD_CG_SUPPORT_VCE_MGCG;
   1668		/* rev0 hardware requires workarounds to support PG */
   1669		adev->pg_flags = 0;
   1670		if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
   1671			adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
   1672				AMD_PG_SUPPORT_GFX_PIPELINE |
   1673				AMD_PG_SUPPORT_CP |
   1674				AMD_PG_SUPPORT_UVD |
   1675				AMD_PG_SUPPORT_VCE;
   1676		}
   1677		adev->external_rev_id = adev->rev_id + 0x1;
   1678		break;
   1679	case CHIP_STONEY:
   1680		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
   1681			AMD_CG_SUPPORT_GFX_MGCG |
   1682			AMD_CG_SUPPORT_GFX_MGLS |
   1683			AMD_CG_SUPPORT_GFX_RLC_LS |
   1684			AMD_CG_SUPPORT_GFX_CP_LS |
   1685			AMD_CG_SUPPORT_GFX_CGTS |
   1686			AMD_CG_SUPPORT_GFX_CGTS_LS |
   1687			AMD_CG_SUPPORT_GFX_CGLS |
   1688			AMD_CG_SUPPORT_BIF_LS |
   1689			AMD_CG_SUPPORT_HDP_MGCG |
   1690			AMD_CG_SUPPORT_HDP_LS |
   1691			AMD_CG_SUPPORT_SDMA_MGCG |
   1692			AMD_CG_SUPPORT_SDMA_LS |
   1693			AMD_CG_SUPPORT_VCE_MGCG;
   1694		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
   1695			AMD_PG_SUPPORT_GFX_SMG |
   1696			AMD_PG_SUPPORT_GFX_PIPELINE |
   1697			AMD_PG_SUPPORT_CP |
   1698			AMD_PG_SUPPORT_UVD |
   1699			AMD_PG_SUPPORT_VCE;
   1700		adev->external_rev_id = adev->rev_id + 0x61;
   1701		break;
   1702	default:
   1703		/* FIXME: not supported yet */
   1704		return -EINVAL;
   1705	}
   1706
   1707	if (amdgpu_sriov_vf(adev)) {
   1708		amdgpu_virt_init_setting(adev);
   1709		xgpu_vi_mailbox_set_irq_funcs(adev);
   1710	}
   1711
   1712	return 0;
   1713}
   1714
   1715static int vi_common_late_init(void *handle)
   1716{
   1717	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1718
   1719	if (amdgpu_sriov_vf(adev))
   1720		xgpu_vi_mailbox_get_irq(adev);
   1721
   1722	return 0;
   1723}
   1724
   1725static int vi_common_sw_init(void *handle)
   1726{
   1727	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1728
   1729	if (amdgpu_sriov_vf(adev))
   1730		xgpu_vi_mailbox_add_irq_id(adev);
   1731
   1732	return 0;
   1733}
   1734
   1735static int vi_common_sw_fini(void *handle)
   1736{
   1737	return 0;
   1738}
   1739
   1740static int vi_common_hw_init(void *handle)
   1741{
   1742	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1743
   1744	/* move the golden regs per IP block */
   1745	vi_init_golden_registers(adev);
   1746	/* enable pcie gen2/3 link */
   1747	vi_pcie_gen3_enable(adev);
   1748	/* enable aspm */
   1749	vi_program_aspm(adev);
   1750	/* enable the doorbell aperture */
   1751	vi_enable_doorbell_aperture(adev, true);
   1752
   1753	return 0;
   1754}
   1755
   1756static int vi_common_hw_fini(void *handle)
   1757{
   1758	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1759
   1760	/* enable the doorbell aperture */
   1761	vi_enable_doorbell_aperture(adev, false);
   1762
   1763	if (amdgpu_sriov_vf(adev))
   1764		xgpu_vi_mailbox_put_irq(adev);
   1765
   1766	return 0;
   1767}
   1768
   1769static int vi_common_suspend(void *handle)
   1770{
   1771	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1772
   1773	return vi_common_hw_fini(adev);
   1774}
   1775
   1776static int vi_common_resume(void *handle)
   1777{
   1778	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1779
   1780	return vi_common_hw_init(adev);
   1781}
   1782
   1783static bool vi_common_is_idle(void *handle)
   1784{
   1785	return true;
   1786}
   1787
   1788static int vi_common_wait_for_idle(void *handle)
   1789{
   1790	return 0;
   1791}
   1792
   1793static int vi_common_soft_reset(void *handle)
   1794{
   1795	return 0;
   1796}
   1797
   1798static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
   1799						   bool enable)
   1800{
   1801	uint32_t temp, data;
   1802
   1803	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
   1804
   1805	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
   1806		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
   1807				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
   1808				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
   1809	else
   1810		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
   1811				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
   1812				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
   1813
   1814	if (temp != data)
   1815		WREG32_PCIE(ixPCIE_CNTL2, data);
   1816}
   1817
   1818static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
   1819						    bool enable)
   1820{
   1821	uint32_t temp, data;
   1822
   1823	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
   1824
   1825	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
   1826		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
   1827	else
   1828		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
   1829
   1830	if (temp != data)
   1831		WREG32(mmHDP_HOST_PATH_CNTL, data);
   1832}
   1833
   1834static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
   1835				      bool enable)
   1836{
   1837	uint32_t temp, data;
   1838
   1839	temp = data = RREG32(mmHDP_MEM_POWER_LS);
   1840
   1841	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
   1842		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
   1843	else
   1844		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
   1845
   1846	if (temp != data)
   1847		WREG32(mmHDP_MEM_POWER_LS, data);
   1848}
   1849
   1850static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
   1851				      bool enable)
   1852{
   1853	uint32_t temp, data;
   1854
   1855	temp = data = RREG32(0x157a);
   1856
   1857	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
   1858		data |= 1;
   1859	else
   1860		data &= ~1;
   1861
   1862	if (temp != data)
   1863		WREG32(0x157a, data);
   1864}
   1865
   1866
   1867static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
   1868						    bool enable)
   1869{
   1870	uint32_t temp, data;
   1871
   1872	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
   1873
   1874	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
   1875		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
   1876				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
   1877	else
   1878		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
   1879				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
   1880
   1881	if (temp != data)
   1882		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
   1883}
   1884
   1885static int vi_common_set_clockgating_state_by_smu(void *handle,
   1886					   enum amd_clockgating_state state)
   1887{
   1888	uint32_t msg_id, pp_state = 0;
   1889	uint32_t pp_support_state = 0;
   1890	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1891
   1892	if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
   1893		if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
   1894			pp_support_state = PP_STATE_SUPPORT_LS;
   1895			pp_state = PP_STATE_LS;
   1896		}
   1897		if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
   1898			pp_support_state |= PP_STATE_SUPPORT_CG;
   1899			pp_state |= PP_STATE_CG;
   1900		}
   1901		if (state == AMD_CG_STATE_UNGATE)
   1902			pp_state = 0;
   1903		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
   1904			       PP_BLOCK_SYS_MC,
   1905			       pp_support_state,
   1906			       pp_state);
   1907		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
   1908	}
   1909
   1910	if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
   1911		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
   1912			pp_support_state = PP_STATE_SUPPORT_LS;
   1913			pp_state = PP_STATE_LS;
   1914		}
   1915		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
   1916			pp_support_state |= PP_STATE_SUPPORT_CG;
   1917			pp_state |= PP_STATE_CG;
   1918		}
   1919		if (state == AMD_CG_STATE_UNGATE)
   1920			pp_state = 0;
   1921		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
   1922			       PP_BLOCK_SYS_SDMA,
   1923			       pp_support_state,
   1924			       pp_state);
   1925		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
   1926	}
   1927
   1928	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
   1929		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
   1930			pp_support_state = PP_STATE_SUPPORT_LS;
   1931			pp_state = PP_STATE_LS;
   1932		}
   1933		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
   1934			pp_support_state |= PP_STATE_SUPPORT_CG;
   1935			pp_state |= PP_STATE_CG;
   1936		}
   1937		if (state == AMD_CG_STATE_UNGATE)
   1938			pp_state = 0;
   1939		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
   1940			       PP_BLOCK_SYS_HDP,
   1941			       pp_support_state,
   1942			       pp_state);
   1943		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
   1944	}
   1945
   1946
   1947	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
   1948		if (state == AMD_CG_STATE_UNGATE)
   1949			pp_state = 0;
   1950		else
   1951			pp_state = PP_STATE_LS;
   1952
   1953		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
   1954			       PP_BLOCK_SYS_BIF,
   1955			       PP_STATE_SUPPORT_LS,
   1956			        pp_state);
   1957		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
   1958	}
   1959	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
   1960		if (state == AMD_CG_STATE_UNGATE)
   1961			pp_state = 0;
   1962		else
   1963			pp_state = PP_STATE_CG;
   1964
   1965		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
   1966			       PP_BLOCK_SYS_BIF,
   1967			       PP_STATE_SUPPORT_CG,
   1968			       pp_state);
   1969		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
   1970	}
   1971
   1972	if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
   1973
   1974		if (state == AMD_CG_STATE_UNGATE)
   1975			pp_state = 0;
   1976		else
   1977			pp_state = PP_STATE_LS;
   1978
   1979		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
   1980			       PP_BLOCK_SYS_DRM,
   1981			       PP_STATE_SUPPORT_LS,
   1982			       pp_state);
   1983		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
   1984	}
   1985
   1986	if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
   1987
   1988		if (state == AMD_CG_STATE_UNGATE)
   1989			pp_state = 0;
   1990		else
   1991			pp_state = PP_STATE_CG;
   1992
   1993		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
   1994			       PP_BLOCK_SYS_ROM,
   1995			       PP_STATE_SUPPORT_CG,
   1996			       pp_state);
   1997		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
   1998	}
   1999	return 0;
   2000}
   2001
   2002static int vi_common_set_clockgating_state(void *handle,
   2003					   enum amd_clockgating_state state)
   2004{
   2005	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   2006
   2007	if (amdgpu_sriov_vf(adev))
   2008		return 0;
   2009
   2010	switch (adev->asic_type) {
   2011	case CHIP_FIJI:
   2012		vi_update_bif_medium_grain_light_sleep(adev,
   2013				state == AMD_CG_STATE_GATE);
   2014		vi_update_hdp_medium_grain_clock_gating(adev,
   2015				state == AMD_CG_STATE_GATE);
   2016		vi_update_hdp_light_sleep(adev,
   2017				state == AMD_CG_STATE_GATE);
   2018		vi_update_rom_medium_grain_clock_gating(adev,
   2019				state == AMD_CG_STATE_GATE);
   2020		break;
   2021	case CHIP_CARRIZO:
   2022	case CHIP_STONEY:
   2023		vi_update_bif_medium_grain_light_sleep(adev,
   2024				state == AMD_CG_STATE_GATE);
   2025		vi_update_hdp_medium_grain_clock_gating(adev,
   2026				state == AMD_CG_STATE_GATE);
   2027		vi_update_hdp_light_sleep(adev,
   2028				state == AMD_CG_STATE_GATE);
   2029		vi_update_drm_light_sleep(adev,
   2030				state == AMD_CG_STATE_GATE);
   2031		break;
   2032	case CHIP_TONGA:
   2033	case CHIP_POLARIS10:
   2034	case CHIP_POLARIS11:
   2035	case CHIP_POLARIS12:
   2036	case CHIP_VEGAM:
   2037		vi_common_set_clockgating_state_by_smu(adev, state);
   2038		break;
   2039	default:
   2040		break;
   2041	}
   2042	return 0;
   2043}
   2044
   2045static int vi_common_set_powergating_state(void *handle,
   2046					    enum amd_powergating_state state)
   2047{
   2048	return 0;
   2049}
   2050
   2051static void vi_common_get_clockgating_state(void *handle, u64 *flags)
   2052{
   2053	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   2054	int data;
   2055
   2056	if (amdgpu_sriov_vf(adev))
   2057		*flags = 0;
   2058
   2059	/* AMD_CG_SUPPORT_BIF_LS */
   2060	data = RREG32_PCIE(ixPCIE_CNTL2);
   2061	if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
   2062		*flags |= AMD_CG_SUPPORT_BIF_LS;
   2063
   2064	/* AMD_CG_SUPPORT_HDP_LS */
   2065	data = RREG32(mmHDP_MEM_POWER_LS);
   2066	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
   2067		*flags |= AMD_CG_SUPPORT_HDP_LS;
   2068
   2069	/* AMD_CG_SUPPORT_HDP_MGCG */
   2070	data = RREG32(mmHDP_HOST_PATH_CNTL);
   2071	if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
   2072		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
   2073
   2074	/* AMD_CG_SUPPORT_ROM_MGCG */
   2075	data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
   2076	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
   2077		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
   2078}
   2079
   2080static const struct amd_ip_funcs vi_common_ip_funcs = {
   2081	.name = "vi_common",
   2082	.early_init = vi_common_early_init,
   2083	.late_init = vi_common_late_init,
   2084	.sw_init = vi_common_sw_init,
   2085	.sw_fini = vi_common_sw_fini,
   2086	.hw_init = vi_common_hw_init,
   2087	.hw_fini = vi_common_hw_fini,
   2088	.suspend = vi_common_suspend,
   2089	.resume = vi_common_resume,
   2090	.is_idle = vi_common_is_idle,
   2091	.wait_for_idle = vi_common_wait_for_idle,
   2092	.soft_reset = vi_common_soft_reset,
   2093	.set_clockgating_state = vi_common_set_clockgating_state,
   2094	.set_powergating_state = vi_common_set_powergating_state,
   2095	.get_clockgating_state = vi_common_get_clockgating_state,
   2096};
   2097
   2098static const struct amdgpu_ip_block_version vi_common_ip_block =
   2099{
   2100	.type = AMD_IP_BLOCK_TYPE_COMMON,
   2101	.major = 1,
   2102	.minor = 0,
   2103	.rev = 0,
   2104	.funcs = &vi_common_ip_funcs,
   2105};
   2106
   2107void vi_set_virt_ops(struct amdgpu_device *adev)
   2108{
   2109	adev->virt.ops = &xgpu_vi_virt_ops;
   2110}
   2111
   2112int vi_set_ip_blocks(struct amdgpu_device *adev)
   2113{
   2114	switch (adev->asic_type) {
   2115	case CHIP_TOPAZ:
   2116		/* topaz has no DCE, UVD, VCE */
   2117		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
   2118		amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
   2119		amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
   2120		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
   2121		amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
   2122		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
   2123		if (adev->enable_virtual_display)
   2124			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
   2125		break;
   2126	case CHIP_FIJI:
   2127		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
   2128		amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
   2129		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
   2130		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
   2131		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
   2132		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
   2133		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
   2134			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
   2135#if defined(CONFIG_DRM_AMD_DC)
   2136		else if (amdgpu_device_has_dc_support(adev))
   2137			amdgpu_device_ip_block_add(adev, &dm_ip_block);
   2138#endif
   2139		else
   2140			amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
   2141		if (!amdgpu_sriov_vf(adev)) {
   2142			amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
   2143			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
   2144		}
   2145		break;
   2146	case CHIP_TONGA:
   2147		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
   2148		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
   2149		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
   2150		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
   2151		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
   2152		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
   2153		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
   2154			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
   2155#if defined(CONFIG_DRM_AMD_DC)
   2156		else if (amdgpu_device_has_dc_support(adev))
   2157			amdgpu_device_ip_block_add(adev, &dm_ip_block);
   2158#endif
   2159		else
   2160			amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
   2161		if (!amdgpu_sriov_vf(adev)) {
   2162			amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
   2163			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
   2164		}
   2165		break;
   2166	case CHIP_POLARIS10:
   2167	case CHIP_POLARIS11:
   2168	case CHIP_POLARIS12:
   2169	case CHIP_VEGAM:
   2170		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
   2171		amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
   2172		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
   2173		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
   2174		amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
   2175		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
   2176		if (adev->enable_virtual_display)
   2177			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
   2178#if defined(CONFIG_DRM_AMD_DC)
   2179		else if (amdgpu_device_has_dc_support(adev))
   2180			amdgpu_device_ip_block_add(adev, &dm_ip_block);
   2181#endif
   2182		else
   2183			amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
   2184		amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
   2185		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
   2186		break;
   2187	case CHIP_CARRIZO:
   2188		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
   2189		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
   2190		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
   2191		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
   2192		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
   2193		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
   2194		if (adev->enable_virtual_display)
   2195			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
   2196#if defined(CONFIG_DRM_AMD_DC)
   2197		else if (amdgpu_device_has_dc_support(adev))
   2198			amdgpu_device_ip_block_add(adev, &dm_ip_block);
   2199#endif
   2200		else
   2201			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
   2202		amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
   2203		amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
   2204#if defined(CONFIG_DRM_AMD_ACP)
   2205		amdgpu_device_ip_block_add(adev, &acp_ip_block);
   2206#endif
   2207		break;
   2208	case CHIP_STONEY:
   2209		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
   2210		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
   2211		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
   2212		amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
   2213		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
   2214		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
   2215		if (adev->enable_virtual_display)
   2216			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
   2217#if defined(CONFIG_DRM_AMD_DC)
   2218		else if (amdgpu_device_has_dc_support(adev))
   2219			amdgpu_device_ip_block_add(adev, &dm_ip_block);
   2220#endif
   2221		else
   2222			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
   2223		amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
   2224		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
   2225#if defined(CONFIG_DRM_AMD_ACP)
   2226		amdgpu_device_ip_block_add(adev, &acp_ip_block);
   2227#endif
   2228		break;
   2229	default:
   2230		/* FIXME: not supported yet */
   2231		return -EINVAL;
   2232	}
   2233
   2234	return 0;
   2235}
   2236
   2237void legacy_doorbell_index_init(struct amdgpu_device *adev)
   2238{
   2239	adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
   2240	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
   2241	adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
   2242	adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
   2243	adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
   2244	adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
   2245	adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
   2246	adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
   2247	adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
   2248	adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
   2249	adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
   2250	adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
   2251	adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
   2252	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
   2253}