cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vcn_v2_0.c (67064B)


      1/*
      2 * Copyright 2018 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#include <linux/firmware.h>
     25#include <drm/drm_drv.h>
     26
     27#include "amdgpu.h"
     28#include "amdgpu_vcn.h"
     29#include "soc15.h"
     30#include "soc15d.h"
     31#include "amdgpu_pm.h"
     32#include "amdgpu_psp.h"
     33#include "mmsch_v2_0.h"
     34#include "vcn_v2_0.h"
     35
     36#include "vcn/vcn_2_0_0_offset.h"
     37#include "vcn/vcn_2_0_0_sh_mask.h"
     38#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
     39
     40#define VCN_VID_SOC_ADDRESS_2_0					0x1fa00
     41#define VCN1_VID_SOC_ADDRESS_3_0				0x48200
     42
     43#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x1fd
     44#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x503
     45#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET			0x504
     46#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET			0x505
     47#define mmUVD_NO_OP_INTERNAL_OFFSET				0x53f
     48#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET			0x54a
     49#define mmUVD_SCRATCH9_INTERNAL_OFFSET				0xc01d
     50
     51#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET			0x1e1
     52#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET		0x5a6
     53#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET		0x5a7
     54#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET			0x1e2
     55
     56static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
     57static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
     58static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
     59static int vcn_v2_0_set_powergating_state(void *handle,
     60				enum amd_powergating_state state);
     61static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
     62				int inst_idx, struct dpg_pause_state *new_state);
     63static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
     64/**
     65 * vcn_v2_0_early_init - set function pointers
     66 *
     67 * @handle: amdgpu_device pointer
     68 *
     69 * Set ring and irq function pointers
     70 */
     71static int vcn_v2_0_early_init(void *handle)
     72{
     73	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     74
     75	if (amdgpu_sriov_vf(adev))
     76		adev->vcn.num_enc_rings = 1;
     77	else
     78		adev->vcn.num_enc_rings = 2;
     79
     80	vcn_v2_0_set_dec_ring_funcs(adev);
     81	vcn_v2_0_set_enc_ring_funcs(adev);
     82	vcn_v2_0_set_irq_funcs(adev);
     83
     84	return 0;
     85}
     86
     87/**
     88 * vcn_v2_0_sw_init - sw init for VCN block
     89 *
     90 * @handle: amdgpu_device pointer
     91 *
     92 * Load firmware and sw initialization
     93 */
     94static int vcn_v2_0_sw_init(void *handle)
     95{
     96	struct amdgpu_ring *ring;
     97	int i, r;
     98	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     99	volatile struct amdgpu_fw_shared *fw_shared;
    100
    101	/* VCN DEC TRAP */
    102	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
    103			      VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
    104			      &adev->vcn.inst->irq);
    105	if (r)
    106		return r;
    107
    108	/* VCN ENC TRAP */
    109	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
    110		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
    111				      i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
    112				      &adev->vcn.inst->irq);
    113		if (r)
    114			return r;
    115	}
    116
    117	r = amdgpu_vcn_sw_init(adev);
    118	if (r)
    119		return r;
    120
    121	amdgpu_vcn_setup_ucode(adev);
    122
    123	r = amdgpu_vcn_resume(adev);
    124	if (r)
    125		return r;
    126
    127	ring = &adev->vcn.inst->ring_dec;
    128
    129	ring->use_doorbell = true;
    130	ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
    131
    132	sprintf(ring->name, "vcn_dec");
    133	r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
    134			     AMDGPU_RING_PRIO_DEFAULT, NULL);
    135	if (r)
    136		return r;
    137
    138	adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
    139	adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
    140	adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
    141	adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
    142	adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
    143	adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
    144
    145	adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
    146	adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
    147	adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
    148	adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
    149	adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
    150	adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
    151	adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
    152	adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
    153	adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
    154	adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
    155
    156	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
    157		enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
    158
    159		ring = &adev->vcn.inst->ring_enc[i];
    160		ring->use_doorbell = true;
    161		if (!amdgpu_sriov_vf(adev))
    162			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
    163		else
    164			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
    165		sprintf(ring->name, "vcn_enc%d", i);
    166		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
    167				     hw_prio, NULL);
    168		if (r)
    169			return r;
    170	}
    171
    172	adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
    173
    174	r = amdgpu_virt_alloc_mm_table(adev);
    175	if (r)
    176		return r;
    177
    178	fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
    179	fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
    180
    181	if (amdgpu_vcnfw_log)
    182		amdgpu_vcn_fwlog_init(adev->vcn.inst);
    183
    184	return 0;
    185}
    186
    187/**
    188 * vcn_v2_0_sw_fini - sw fini for VCN block
    189 *
    190 * @handle: amdgpu_device pointer
    191 *
    192 * VCN suspend and free up sw allocation
    193 */
    194static int vcn_v2_0_sw_fini(void *handle)
    195{
    196	int r, idx;
    197	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    198	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
    199
    200	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
    201		fw_shared->present_flag_0 = 0;
    202		drm_dev_exit(idx);
    203	}
    204
    205	amdgpu_virt_free_mm_table(adev);
    206
    207	r = amdgpu_vcn_suspend(adev);
    208	if (r)
    209		return r;
    210
    211	r = amdgpu_vcn_sw_fini(adev);
    212
    213	return r;
    214}
    215
    216/**
    217 * vcn_v2_0_hw_init - start and test VCN block
    218 *
    219 * @handle: amdgpu_device pointer
    220 *
    221 * Initialize the hardware, boot up the VCPU and do some testing
    222 */
    223static int vcn_v2_0_hw_init(void *handle)
    224{
    225	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    226	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
    227	int i, r;
    228
    229	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
    230					     ring->doorbell_index, 0);
    231
    232	if (amdgpu_sriov_vf(adev))
    233		vcn_v2_0_start_sriov(adev);
    234
    235	r = amdgpu_ring_test_helper(ring);
    236	if (r)
    237		goto done;
    238
    239	//Disable vcn decode for sriov
    240	if (amdgpu_sriov_vf(adev))
    241		ring->sched.ready = false;
    242
    243	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
    244		ring = &adev->vcn.inst->ring_enc[i];
    245		r = amdgpu_ring_test_helper(ring);
    246		if (r)
    247			goto done;
    248	}
    249
    250done:
    251	if (!r)
    252		DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
    253			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
    254
    255	return r;
    256}
    257
    258/**
    259 * vcn_v2_0_hw_fini - stop the hardware block
    260 *
    261 * @handle: amdgpu_device pointer
    262 *
    263 * Stop the VCN block, mark ring as not ready any more
    264 */
    265static int vcn_v2_0_hw_fini(void *handle)
    266{
    267	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    268
    269	cancel_delayed_work_sync(&adev->vcn.idle_work);
    270
    271	if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
    272	    (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
    273	      RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
    274		vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
    275
    276	return 0;
    277}
    278
    279/**
    280 * vcn_v2_0_suspend - suspend VCN block
    281 *
    282 * @handle: amdgpu_device pointer
    283 *
    284 * HW fini and suspend VCN block
    285 */
    286static int vcn_v2_0_suspend(void *handle)
    287{
    288	int r;
    289	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    290
    291	r = vcn_v2_0_hw_fini(adev);
    292	if (r)
    293		return r;
    294
    295	r = amdgpu_vcn_suspend(adev);
    296
    297	return r;
    298}
    299
    300/**
    301 * vcn_v2_0_resume - resume VCN block
    302 *
    303 * @handle: amdgpu_device pointer
    304 *
    305 * Resume firmware and hw init VCN block
    306 */
    307static int vcn_v2_0_resume(void *handle)
    308{
    309	int r;
    310	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    311
    312	r = amdgpu_vcn_resume(adev);
    313	if (r)
    314		return r;
    315
    316	r = vcn_v2_0_hw_init(adev);
    317
    318	return r;
    319}
    320
    321/**
    322 * vcn_v2_0_mc_resume - memory controller programming
    323 *
    324 * @adev: amdgpu_device pointer
    325 *
    326 * Let the VCN memory controller know it's offsets
    327 */
    328static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
    329{
    330	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
    331	uint32_t offset;
    332
    333	if (amdgpu_sriov_vf(adev))
    334		return;
    335
    336	/* cache window 0: fw */
    337	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
    338		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
    339			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
    340		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
    341			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
    342		WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
    343		offset = 0;
    344	} else {
    345		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
    346			lower_32_bits(adev->vcn.inst->gpu_addr));
    347		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
    348			upper_32_bits(adev->vcn.inst->gpu_addr));
    349		offset = size;
    350		WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
    351			AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
    352	}
    353
    354	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
    355
    356	/* cache window 1: stack */
    357	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
    358		lower_32_bits(adev->vcn.inst->gpu_addr + offset));
    359	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
    360		upper_32_bits(adev->vcn.inst->gpu_addr + offset));
    361	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
    362	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
    363
    364	/* cache window 2: context */
    365	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
    366		lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
    367	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
    368		upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
    369	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
    370	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
    371
    372	/* non-cache window */
    373	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
    374		lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr));
    375	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
    376		upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr));
    377	WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
    378	WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
    379		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
    380
    381	WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
    382}
    383
    384static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
    385{
    386	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
    387	uint32_t offset;
    388
    389	/* cache window 0: fw */
    390	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
    391		if (!indirect) {
    392			WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    393				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
    394				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
    395			WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    396				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
    397				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
    398			WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    399				UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
    400		} else {
    401			WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    402				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
    403			WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    404				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
    405			WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    406				UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
    407		}
    408		offset = 0;
    409	} else {
    410		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    411			UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
    412			lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
    413		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    414			UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
    415			upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
    416		offset = size;
    417		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    418			UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
    419			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
    420	}
    421
    422	if (!indirect)
    423		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    424			UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
    425	else
    426		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    427			UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
    428
    429	/* cache window 1: stack */
    430	if (!indirect) {
    431		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    432			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
    433			lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
    434		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    435			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
    436			upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
    437		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    438			UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
    439	} else {
    440		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    441			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
    442		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    443			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
    444		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    445			UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
    446	}
    447	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    448		UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
    449
    450	/* cache window 2: context */
    451	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    452		UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
    453		lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
    454	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    455		UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
    456		upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
    457	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    458		UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
    459	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    460		UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
    461
    462	/* non-cache window */
    463	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    464		UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
    465		lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect);
    466	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    467		UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
    468		upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect);
    469	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    470		UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
    471	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    472		UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
    473		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
    474
    475	/* VCN global tiling registers */
    476	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    477		UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
    478}
    479
    480/**
    481 * vcn_v2_0_disable_clock_gating - disable VCN clock gating
    482 *
    483 * @adev: amdgpu_device pointer
    484 *
    485 * Disable clock gating for VCN block
    486 */
    487static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
    488{
    489	uint32_t data;
    490
    491	if (amdgpu_sriov_vf(adev))
    492		return;
    493
    494	/* UVD disable CGC */
    495	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
    496	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
    497		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
    498	else
    499		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
    500	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
    501	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
    502	WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
    503
    504	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
    505	data &= ~(UVD_CGC_GATE__SYS_MASK
    506		| UVD_CGC_GATE__UDEC_MASK
    507		| UVD_CGC_GATE__MPEG2_MASK
    508		| UVD_CGC_GATE__REGS_MASK
    509		| UVD_CGC_GATE__RBC_MASK
    510		| UVD_CGC_GATE__LMI_MC_MASK
    511		| UVD_CGC_GATE__LMI_UMC_MASK
    512		| UVD_CGC_GATE__IDCT_MASK
    513		| UVD_CGC_GATE__MPRD_MASK
    514		| UVD_CGC_GATE__MPC_MASK
    515		| UVD_CGC_GATE__LBSI_MASK
    516		| UVD_CGC_GATE__LRBBM_MASK
    517		| UVD_CGC_GATE__UDEC_RE_MASK
    518		| UVD_CGC_GATE__UDEC_CM_MASK
    519		| UVD_CGC_GATE__UDEC_IT_MASK
    520		| UVD_CGC_GATE__UDEC_DB_MASK
    521		| UVD_CGC_GATE__UDEC_MP_MASK
    522		| UVD_CGC_GATE__WCB_MASK
    523		| UVD_CGC_GATE__VCPU_MASK
    524		| UVD_CGC_GATE__SCPU_MASK);
    525	WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
    526
    527	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
    528	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
    529		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
    530		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
    531		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
    532		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
    533		| UVD_CGC_CTRL__SYS_MODE_MASK
    534		| UVD_CGC_CTRL__UDEC_MODE_MASK
    535		| UVD_CGC_CTRL__MPEG2_MODE_MASK
    536		| UVD_CGC_CTRL__REGS_MODE_MASK
    537		| UVD_CGC_CTRL__RBC_MODE_MASK
    538		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
    539		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
    540		| UVD_CGC_CTRL__IDCT_MODE_MASK
    541		| UVD_CGC_CTRL__MPRD_MODE_MASK
    542		| UVD_CGC_CTRL__MPC_MODE_MASK
    543		| UVD_CGC_CTRL__LBSI_MODE_MASK
    544		| UVD_CGC_CTRL__LRBBM_MODE_MASK
    545		| UVD_CGC_CTRL__WCB_MODE_MASK
    546		| UVD_CGC_CTRL__VCPU_MODE_MASK
    547		| UVD_CGC_CTRL__SCPU_MODE_MASK);
    548	WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
    549
    550	/* turn on */
    551	data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
    552	data |= (UVD_SUVD_CGC_GATE__SRE_MASK
    553		| UVD_SUVD_CGC_GATE__SIT_MASK
    554		| UVD_SUVD_CGC_GATE__SMP_MASK
    555		| UVD_SUVD_CGC_GATE__SCM_MASK
    556		| UVD_SUVD_CGC_GATE__SDB_MASK
    557		| UVD_SUVD_CGC_GATE__SRE_H264_MASK
    558		| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
    559		| UVD_SUVD_CGC_GATE__SIT_H264_MASK
    560		| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
    561		| UVD_SUVD_CGC_GATE__SCM_H264_MASK
    562		| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
    563		| UVD_SUVD_CGC_GATE__SDB_H264_MASK
    564		| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
    565		| UVD_SUVD_CGC_GATE__SCLR_MASK
    566		| UVD_SUVD_CGC_GATE__UVD_SC_MASK
    567		| UVD_SUVD_CGC_GATE__ENT_MASK
    568		| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
    569		| UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
    570		| UVD_SUVD_CGC_GATE__SITE_MASK
    571		| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
    572		| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
    573		| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
    574		| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
    575		| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
    576	WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
    577
    578	data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
    579	data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
    580		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
    581		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
    582		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
    583		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
    584		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
    585		| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
    586		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
    587		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
    588		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
    589	WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
    590}
    591
    592static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
    593		uint8_t sram_sel, uint8_t indirect)
    594{
    595	uint32_t reg_data = 0;
    596
    597	/* enable sw clock gating control */
    598	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
    599		reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
    600	else
    601		reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
    602	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
    603	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
    604	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
    605		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
    606		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
    607		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
    608		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
    609		 UVD_CGC_CTRL__SYS_MODE_MASK |
    610		 UVD_CGC_CTRL__UDEC_MODE_MASK |
    611		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
    612		 UVD_CGC_CTRL__REGS_MODE_MASK |
    613		 UVD_CGC_CTRL__RBC_MODE_MASK |
    614		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
    615		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
    616		 UVD_CGC_CTRL__IDCT_MODE_MASK |
    617		 UVD_CGC_CTRL__MPRD_MODE_MASK |
    618		 UVD_CGC_CTRL__MPC_MODE_MASK |
    619		 UVD_CGC_CTRL__LBSI_MODE_MASK |
    620		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
    621		 UVD_CGC_CTRL__WCB_MODE_MASK |
    622		 UVD_CGC_CTRL__VCPU_MODE_MASK |
    623		 UVD_CGC_CTRL__SCPU_MODE_MASK);
    624	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    625		UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
    626
    627	/* turn off clock gating */
    628	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    629		UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
    630
    631	/* turn on SUVD clock gating */
    632	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    633		UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
    634
    635	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
    636	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    637		UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
    638}
    639
    640/**
    641 * vcn_v2_0_enable_clock_gating - enable VCN clock gating
    642 *
    643 * @adev: amdgpu_device pointer
    644 *
    645 * Enable clock gating for VCN block
    646 */
    647static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
    648{
    649	uint32_t data = 0;
    650
    651	if (amdgpu_sriov_vf(adev))
    652		return;
    653
    654	/* enable UVD CGC */
    655	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
    656	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
    657		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
    658	else
    659		data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
    660	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
    661	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
    662	WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
    663
    664	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
    665	data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
    666		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
    667		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
    668		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
    669		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
    670		| UVD_CGC_CTRL__SYS_MODE_MASK
    671		| UVD_CGC_CTRL__UDEC_MODE_MASK
    672		| UVD_CGC_CTRL__MPEG2_MODE_MASK
    673		| UVD_CGC_CTRL__REGS_MODE_MASK
    674		| UVD_CGC_CTRL__RBC_MODE_MASK
    675		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
    676		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
    677		| UVD_CGC_CTRL__IDCT_MODE_MASK
    678		| UVD_CGC_CTRL__MPRD_MODE_MASK
    679		| UVD_CGC_CTRL__MPC_MODE_MASK
    680		| UVD_CGC_CTRL__LBSI_MODE_MASK
    681		| UVD_CGC_CTRL__LRBBM_MODE_MASK
    682		| UVD_CGC_CTRL__WCB_MODE_MASK
    683		| UVD_CGC_CTRL__VCPU_MODE_MASK
    684		| UVD_CGC_CTRL__SCPU_MODE_MASK);
    685	WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
    686
    687	data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
    688	data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
    689		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
    690		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
    691		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
    692		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
    693		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
    694		| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
    695		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
    696		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
    697		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
    698	WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
    699}
    700
    701static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
    702{
    703	uint32_t data = 0;
    704
    705	if (amdgpu_sriov_vf(adev))
    706		return;
    707
    708	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
    709		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
    710			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
    711			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
    712			| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
    713			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
    714			| 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
    715			| 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
    716			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
    717			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
    718			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
    719
    720		WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
    721		SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
    722			UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF);
    723	} else {
    724		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
    725			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
    726			| 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
    727			| 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
    728			| 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
    729			| 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
    730			| 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
    731			| 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
    732			| 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
    733			| 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
    734		WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
    735		SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0,  0xFFFFF);
    736	}
    737
    738	/* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
    739	 * UVDU_PWR_STATUS are 0 (power on) */
    740
    741	data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
    742	data &= ~0x103;
    743	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
    744		data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
    745			UVD_POWER_STATUS__UVD_PG_EN_MASK;
    746
    747	WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
    748}
    749
    750static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
    751{
    752	uint32_t data = 0;
    753
    754	if (amdgpu_sriov_vf(adev))
    755		return;
    756
    757	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
    758		/* Before power off, this indicator has to be turned on */
    759		data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
    760		data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
    761		data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
    762		WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
    763
    764
    765		data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
    766			| 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
    767			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
    768			| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
    769			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
    770			| 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
    771			| 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
    772			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
    773			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
    774			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
    775
    776		WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
    777
    778		data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
    779			| 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
    780			| 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
    781			| 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
    782			| 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
    783			| 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
    784			| 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
    785			| 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
    786			| 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
    787			| 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT);
    788		SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF);
    789	}
    790}
    791
    792static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
    793{
    794	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
    795	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
    796	uint32_t rb_bufsz, tmp;
    797
    798	vcn_v2_0_enable_static_power_gating(adev);
    799
    800	/* enable dynamic power gating mode */
    801	tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
    802	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
    803	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
    804	WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
    805
    806	if (indirect)
    807		adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr;
    808
    809	/* enable clock gating */
    810	vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
    811
    812	/* enable VCPU clock */
    813	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
    814	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
    815	tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
    816	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    817		UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
    818
    819	/* disable master interupt */
    820	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    821		UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
    822
    823	/* setup mmUVD_LMI_CTRL */
    824	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
    825		UVD_LMI_CTRL__REQ_MODE_MASK |
    826		UVD_LMI_CTRL__CRC_RESET_MASK |
    827		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
    828		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
    829		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
    830		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
    831		0x00100000L);
    832	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    833		UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
    834
    835	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    836		UVD, 0, mmUVD_MPC_CNTL),
    837		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
    838
    839	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    840		UVD, 0, mmUVD_MPC_SET_MUXA0),
    841		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
    842		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
    843		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
    844		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
    845
    846	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    847		UVD, 0, mmUVD_MPC_SET_MUXB0),
    848		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
    849		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
    850		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
    851		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
    852
    853	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    854		UVD, 0, mmUVD_MPC_SET_MUX),
    855		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
    856		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
    857		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
    858
    859	vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
    860
    861	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    862		UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
    863	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    864		UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
    865
    866	/* release VCPU reset to boot */
    867	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    868		UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
    869
    870	/* enable LMI MC and UMC channels */
    871	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    872		UVD, 0, mmUVD_LMI_CTRL2),
    873		0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
    874
    875	/* enable master interrupt */
    876	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
    877		UVD, 0, mmUVD_MASTINT_EN),
    878		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
    879
    880	if (indirect)
    881		psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
    882				    (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
    883					       (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
    884
    885	/* force RBC into idle state */
    886	rb_bufsz = order_base_2(ring->ring_size);
    887	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
    888	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
    889	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
    890	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
    891	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
    892	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
    893
    894	/* Stall DPG before WPTR/RPTR reset */
    895	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
    896		UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
    897		~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
    898	fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
    899
    900	/* set the write pointer delay */
    901	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
    902
    903	/* set the wb address */
    904	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
    905		(upper_32_bits(ring->gpu_addr) >> 2));
    906
    907	/* program the RB_BASE for ring buffer */
    908	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
    909		lower_32_bits(ring->gpu_addr));
    910	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
    911		upper_32_bits(ring->gpu_addr));
    912
    913	/* Initialize the ring buffer's read and write pointers */
    914	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
    915
    916	WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
    917
    918	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
    919	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
    920		lower_32_bits(ring->wptr));
    921
    922	fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
    923	/* Unstall DPG */
    924	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
    925		0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
    926	return 0;
    927}
    928
    929static int vcn_v2_0_start(struct amdgpu_device *adev)
    930{
    931	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
    932	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
    933	uint32_t rb_bufsz, tmp;
    934	uint32_t lmi_swap_cntl;
    935	int i, j, r;
    936
    937	if (adev->pm.dpm_enabled)
    938		amdgpu_dpm_enable_uvd(adev, true);
    939
    940	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
    941		return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
    942
    943	vcn_v2_0_disable_static_power_gating(adev);
    944
    945	/* set uvd status busy */
    946	tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
    947	WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
    948
    949	/*SW clock gating */
    950	vcn_v2_0_disable_clock_gating(adev);
    951
    952	/* enable VCPU clock */
    953	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
    954		UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
    955
    956	/* disable master interrupt */
    957	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
    958		~UVD_MASTINT_EN__VCPU_EN_MASK);
    959
    960	/* setup mmUVD_LMI_CTRL */
    961	tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
    962	WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
    963		UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK	|
    964		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
    965		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
    966		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
    967
    968	/* setup mmUVD_MPC_CNTL */
    969	tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
    970	tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
    971	tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
    972	WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp);
    973
    974	/* setup UVD_MPC_SET_MUXA0 */
    975	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
    976		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
    977		(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
    978		(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
    979		(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
    980
    981	/* setup UVD_MPC_SET_MUXB0 */
    982	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
    983		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
    984		(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
    985		(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
    986		(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
    987
    988	/* setup mmUVD_MPC_SET_MUX */
    989	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
    990		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
    991		(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
    992		(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
    993
    994	vcn_v2_0_mc_resume(adev);
    995
    996	/* release VCPU reset to boot */
    997	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
    998		~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
    999
   1000	/* enable LMI MC and UMC channels */
   1001	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
   1002		~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
   1003
   1004	tmp = RREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET);
   1005	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
   1006	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
   1007	WREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET, tmp);
   1008
   1009	/* disable byte swapping */
   1010	lmi_swap_cntl = 0;
   1011#ifdef __BIG_ENDIAN
   1012	/* swap (8 in 32) RB and IB */
   1013	lmi_swap_cntl = 0xa;
   1014#endif
   1015	WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
   1016
   1017	for (i = 0; i < 10; ++i) {
   1018		uint32_t status;
   1019
   1020		for (j = 0; j < 100; ++j) {
   1021			status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
   1022			if (status & 2)
   1023				break;
   1024			mdelay(10);
   1025		}
   1026		r = 0;
   1027		if (status & 2)
   1028			break;
   1029
   1030		DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
   1031		WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
   1032			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
   1033			~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
   1034		mdelay(10);
   1035		WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
   1036			~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
   1037		mdelay(10);
   1038		r = -1;
   1039	}
   1040
   1041	if (r) {
   1042		DRM_ERROR("VCN decode not responding, giving up!!!\n");
   1043		return r;
   1044	}
   1045
   1046	/* enable master interrupt */
   1047	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
   1048		UVD_MASTINT_EN__VCPU_EN_MASK,
   1049		~UVD_MASTINT_EN__VCPU_EN_MASK);
   1050
   1051	/* clear the busy bit of VCN_STATUS */
   1052	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
   1053		~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
   1054
   1055	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0);
   1056
   1057	/* force RBC into idle state */
   1058	rb_bufsz = order_base_2(ring->ring_size);
   1059	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
   1060	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
   1061	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
   1062	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
   1063	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
   1064	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
   1065
   1066	fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
   1067	/* program the RB_BASE for ring buffer */
   1068	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
   1069		lower_32_bits(ring->gpu_addr));
   1070	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
   1071		upper_32_bits(ring->gpu_addr));
   1072
   1073	/* Initialize the ring buffer's read and write pointers */
   1074	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
   1075
   1076	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
   1077	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
   1078			lower_32_bits(ring->wptr));
   1079	fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
   1080
   1081	fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
   1082	ring = &adev->vcn.inst->ring_enc[0];
   1083	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
   1084	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
   1085	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
   1086	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
   1087	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
   1088	fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
   1089
   1090	fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
   1091	ring = &adev->vcn.inst->ring_enc[1];
   1092	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
   1093	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
   1094	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
   1095	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
   1096	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
   1097	fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
   1098
   1099	return 0;
   1100}
   1101
   1102static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
   1103{
   1104	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
   1105	uint32_t tmp;
   1106
   1107	vcn_v2_0_pause_dpg_mode(adev, 0, &state);
   1108	/* Wait for power status to be 1 */
   1109	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
   1110		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
   1111
   1112	/* wait for read ptr to be equal to write ptr */
   1113	tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
   1114	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
   1115
   1116	tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
   1117	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
   1118
   1119	tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
   1120	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
   1121
   1122	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
   1123		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
   1124
   1125	/* disable dynamic power gating mode */
   1126	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
   1127			~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
   1128
   1129	return 0;
   1130}
   1131
   1132static int vcn_v2_0_stop(struct amdgpu_device *adev)
   1133{
   1134	uint32_t tmp;
   1135	int r;
   1136
   1137	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
   1138		r = vcn_v2_0_stop_dpg_mode(adev);
   1139		if (r)
   1140			return r;
   1141		goto power_off;
   1142	}
   1143
   1144	/* wait for uvd idle */
   1145	r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
   1146	if (r)
   1147		return r;
   1148
   1149	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
   1150		UVD_LMI_STATUS__READ_CLEAN_MASK |
   1151		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
   1152		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
   1153	r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
   1154	if (r)
   1155		return r;
   1156
   1157	/* stall UMC channel */
   1158	tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2);
   1159	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
   1160	WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp);
   1161
   1162	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
   1163		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
   1164	r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
   1165	if (r)
   1166		return r;
   1167
   1168	/* disable VCPU clock */
   1169	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
   1170		~(UVD_VCPU_CNTL__CLK_EN_MASK));
   1171
   1172	/* reset LMI UMC */
   1173	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
   1174		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
   1175		~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
   1176
   1177	/* reset LMI */
   1178	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
   1179		UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
   1180		~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
   1181
   1182	/* reset VCPU */
   1183	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
   1184		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
   1185		~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
   1186
   1187	/* clear status */
   1188	WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
   1189
   1190	vcn_v2_0_enable_clock_gating(adev);
   1191	vcn_v2_0_enable_static_power_gating(adev);
   1192
   1193power_off:
   1194	if (adev->pm.dpm_enabled)
   1195		amdgpu_dpm_enable_uvd(adev, false);
   1196
   1197	return 0;
   1198}
   1199
   1200static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
   1201				int inst_idx, struct dpg_pause_state *new_state)
   1202{
   1203	struct amdgpu_ring *ring;
   1204	uint32_t reg_data = 0;
   1205	int ret_code;
   1206
   1207	/* pause/unpause if state is changed */
   1208	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
   1209		DRM_DEBUG("dpg pause state changed %d -> %d",
   1210			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
   1211		reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
   1212			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
   1213
   1214		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
   1215			ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
   1216				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
   1217
   1218			if (!ret_code) {
   1219				volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
   1220				/* pause DPG */
   1221				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
   1222				WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
   1223
   1224				/* wait for ACK */
   1225				SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
   1226					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
   1227					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
   1228
   1229				/* Stall DPG before WPTR/RPTR reset */
   1230				WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
   1231					   UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
   1232					   ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
   1233				/* Restore */
   1234				fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
   1235				ring = &adev->vcn.inst->ring_enc[0];
   1236				ring->wptr = 0;
   1237				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
   1238				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
   1239				WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
   1240				WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
   1241				WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
   1242				fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
   1243
   1244				fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
   1245				ring = &adev->vcn.inst->ring_enc[1];
   1246				ring->wptr = 0;
   1247				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
   1248				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
   1249				WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
   1250				WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
   1251				WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
   1252				fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
   1253
   1254				fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
   1255				WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
   1256					   RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
   1257				fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
   1258				/* Unstall DPG */
   1259				WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
   1260					   0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
   1261
   1262				SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
   1263					   UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
   1264					   UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
   1265			}
   1266		} else {
   1267			/* unpause dpg, no need to wait */
   1268			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
   1269			WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
   1270		}
   1271		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
   1272	}
   1273
   1274	return 0;
   1275}
   1276
   1277static bool vcn_v2_0_is_idle(void *handle)
   1278{
   1279	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1280
   1281	return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
   1282}
   1283
   1284static int vcn_v2_0_wait_for_idle(void *handle)
   1285{
   1286	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1287	int ret;
   1288
   1289	ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
   1290		UVD_STATUS__IDLE);
   1291
   1292	return ret;
   1293}
   1294
   1295static int vcn_v2_0_set_clockgating_state(void *handle,
   1296					  enum amd_clockgating_state state)
   1297{
   1298	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1299	bool enable = (state == AMD_CG_STATE_GATE);
   1300
   1301	if (amdgpu_sriov_vf(adev))
   1302		return 0;
   1303
   1304	if (enable) {
   1305		/* wait for STATUS to clear */
   1306		if (!vcn_v2_0_is_idle(handle))
   1307			return -EBUSY;
   1308		vcn_v2_0_enable_clock_gating(adev);
   1309	} else {
   1310		/* disable HW gating and enable Sw gating */
   1311		vcn_v2_0_disable_clock_gating(adev);
   1312	}
   1313	return 0;
   1314}
   1315
   1316/**
   1317 * vcn_v2_0_dec_ring_get_rptr - get read pointer
   1318 *
   1319 * @ring: amdgpu_ring pointer
   1320 *
   1321 * Returns the current hardware read pointer
   1322 */
   1323static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
   1324{
   1325	struct amdgpu_device *adev = ring->adev;
   1326
   1327	return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
   1328}
   1329
   1330/**
   1331 * vcn_v2_0_dec_ring_get_wptr - get write pointer
   1332 *
   1333 * @ring: amdgpu_ring pointer
   1334 *
   1335 * Returns the current hardware write pointer
   1336 */
   1337static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
   1338{
   1339	struct amdgpu_device *adev = ring->adev;
   1340
   1341	if (ring->use_doorbell)
   1342		return *ring->wptr_cpu_addr;
   1343	else
   1344		return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
   1345}
   1346
   1347/**
   1348 * vcn_v2_0_dec_ring_set_wptr - set write pointer
   1349 *
   1350 * @ring: amdgpu_ring pointer
   1351 *
   1352 * Commits the write pointer to the hardware
   1353 */
   1354static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
   1355{
   1356	struct amdgpu_device *adev = ring->adev;
   1357
   1358	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
   1359		WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
   1360			lower_32_bits(ring->wptr) | 0x80000000);
   1361
   1362	if (ring->use_doorbell) {
   1363		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
   1364		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
   1365	} else {
   1366		WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
   1367	}
   1368}
   1369
   1370/**
   1371 * vcn_v2_0_dec_ring_insert_start - insert a start command
   1372 *
   1373 * @ring: amdgpu_ring pointer
   1374 *
   1375 * Write a start command to the ring.
   1376 */
   1377void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
   1378{
   1379	struct amdgpu_device *adev = ring->adev;
   1380
   1381	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
   1382	amdgpu_ring_write(ring, 0);
   1383	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
   1384	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
   1385}
   1386
   1387/**
   1388 * vcn_v2_0_dec_ring_insert_end - insert a end command
   1389 *
   1390 * @ring: amdgpu_ring pointer
   1391 *
   1392 * Write a end command to the ring.
   1393 */
   1394void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
   1395{
   1396	struct amdgpu_device *adev = ring->adev;
   1397
   1398	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
   1399	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
   1400}
   1401
   1402/**
   1403 * vcn_v2_0_dec_ring_insert_nop - insert a nop command
   1404 *
   1405 * @ring: amdgpu_ring pointer
   1406 * @count: the number of NOP packets to insert
   1407 *
   1408 * Write a nop command to the ring.
   1409 */
   1410void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
   1411{
   1412	struct amdgpu_device *adev = ring->adev;
   1413	int i;
   1414
   1415	WARN_ON(ring->wptr % 2 || count % 2);
   1416
   1417	for (i = 0; i < count / 2; i++) {
   1418		amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
   1419		amdgpu_ring_write(ring, 0);
   1420	}
   1421}
   1422
   1423/**
   1424 * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command
   1425 *
   1426 * @ring: amdgpu_ring pointer
   1427 * @addr: address
   1428 * @seq: sequence number
   1429 * @flags: fence related flags
   1430 *
   1431 * Write a fence and a trap command to the ring.
   1432 */
   1433void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
   1434				unsigned flags)
   1435{
   1436	struct amdgpu_device *adev = ring->adev;
   1437
   1438	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
   1439	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
   1440	amdgpu_ring_write(ring, seq);
   1441
   1442	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
   1443	amdgpu_ring_write(ring, addr & 0xffffffff);
   1444
   1445	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
   1446	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
   1447
   1448	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
   1449	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
   1450
   1451	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
   1452	amdgpu_ring_write(ring, 0);
   1453
   1454	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
   1455	amdgpu_ring_write(ring, 0);
   1456
   1457	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
   1458
   1459	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
   1460}
   1461
   1462/**
   1463 * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer
   1464 *
   1465 * @ring: amdgpu_ring pointer
   1466 * @job: job to retrieve vmid from
   1467 * @ib: indirect buffer to execute
   1468 * @flags: unused
   1469 *
   1470 * Write ring commands to execute the indirect buffer
   1471 */
   1472void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
   1473			       struct amdgpu_job *job,
   1474			       struct amdgpu_ib *ib,
   1475			       uint32_t flags)
   1476{
   1477	struct amdgpu_device *adev = ring->adev;
   1478	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
   1479
   1480	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
   1481	amdgpu_ring_write(ring, vmid);
   1482
   1483	amdgpu_ring_write(ring,	PACKET0(adev->vcn.internal.ib_bar_low, 0));
   1484	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
   1485	amdgpu_ring_write(ring,	PACKET0(adev->vcn.internal.ib_bar_high, 0));
   1486	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
   1487	amdgpu_ring_write(ring,	PACKET0(adev->vcn.internal.ib_size, 0));
   1488	amdgpu_ring_write(ring, ib->length_dw);
   1489}
   1490
   1491void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
   1492				uint32_t val, uint32_t mask)
   1493{
   1494	struct amdgpu_device *adev = ring->adev;
   1495
   1496	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
   1497	amdgpu_ring_write(ring, reg << 2);
   1498
   1499	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
   1500	amdgpu_ring_write(ring, val);
   1501
   1502	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
   1503	amdgpu_ring_write(ring, mask);
   1504
   1505	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
   1506
   1507	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
   1508}
   1509
   1510void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
   1511				unsigned vmid, uint64_t pd_addr)
   1512{
   1513	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
   1514	uint32_t data0, data1, mask;
   1515
   1516	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
   1517
   1518	/* wait for register write */
   1519	data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
   1520	data1 = lower_32_bits(pd_addr);
   1521	mask = 0xffffffff;
   1522	vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
   1523}
   1524
   1525void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
   1526				uint32_t reg, uint32_t val)
   1527{
   1528	struct amdgpu_device *adev = ring->adev;
   1529
   1530	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
   1531	amdgpu_ring_write(ring, reg << 2);
   1532
   1533	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
   1534	amdgpu_ring_write(ring, val);
   1535
   1536	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
   1537
   1538	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
   1539}
   1540
   1541/**
   1542 * vcn_v2_0_enc_ring_get_rptr - get enc read pointer
   1543 *
   1544 * @ring: amdgpu_ring pointer
   1545 *
   1546 * Returns the current hardware enc read pointer
   1547 */
   1548static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
   1549{
   1550	struct amdgpu_device *adev = ring->adev;
   1551
   1552	if (ring == &adev->vcn.inst->ring_enc[0])
   1553		return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
   1554	else
   1555		return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
   1556}
   1557
   1558 /**
   1559 * vcn_v2_0_enc_ring_get_wptr - get enc write pointer
   1560 *
   1561 * @ring: amdgpu_ring pointer
   1562 *
   1563 * Returns the current hardware enc write pointer
   1564 */
   1565static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
   1566{
   1567	struct amdgpu_device *adev = ring->adev;
   1568
   1569	if (ring == &adev->vcn.inst->ring_enc[0]) {
   1570		if (ring->use_doorbell)
   1571			return *ring->wptr_cpu_addr;
   1572		else
   1573			return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
   1574	} else {
   1575		if (ring->use_doorbell)
   1576			return *ring->wptr_cpu_addr;
   1577		else
   1578			return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
   1579	}
   1580}
   1581
   1582 /**
   1583 * vcn_v2_0_enc_ring_set_wptr - set enc write pointer
   1584 *
   1585 * @ring: amdgpu_ring pointer
   1586 *
   1587 * Commits the enc write pointer to the hardware
   1588 */
   1589static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
   1590{
   1591	struct amdgpu_device *adev = ring->adev;
   1592
   1593	if (ring == &adev->vcn.inst->ring_enc[0]) {
   1594		if (ring->use_doorbell) {
   1595			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
   1596			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
   1597		} else {
   1598			WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
   1599		}
   1600	} else {
   1601		if (ring->use_doorbell) {
   1602			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
   1603			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
   1604		} else {
   1605			WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
   1606		}
   1607	}
   1608}
   1609
   1610/**
   1611 * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command
   1612 *
   1613 * @ring: amdgpu_ring pointer
   1614 * @addr: address
   1615 * @seq: sequence number
   1616 * @flags: fence related flags
   1617 *
   1618 * Write enc a fence and a trap command to the ring.
   1619 */
   1620void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
   1621				u64 seq, unsigned flags)
   1622{
   1623	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
   1624
   1625	amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
   1626	amdgpu_ring_write(ring, addr);
   1627	amdgpu_ring_write(ring, upper_32_bits(addr));
   1628	amdgpu_ring_write(ring, seq);
   1629	amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
   1630}
   1631
   1632void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
   1633{
   1634	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
   1635}
   1636
   1637/**
   1638 * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer
   1639 *
   1640 * @ring: amdgpu_ring pointer
   1641 * @job: job to retrive vmid from
   1642 * @ib: indirect buffer to execute
   1643 * @flags: unused
   1644 *
   1645 * Write enc ring commands to execute the indirect buffer
   1646 */
   1647void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
   1648			       struct amdgpu_job *job,
   1649			       struct amdgpu_ib *ib,
   1650			       uint32_t flags)
   1651{
   1652	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
   1653
   1654	amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
   1655	amdgpu_ring_write(ring, vmid);
   1656	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
   1657	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
   1658	amdgpu_ring_write(ring, ib->length_dw);
   1659}
   1660
   1661void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
   1662				uint32_t val, uint32_t mask)
   1663{
   1664	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
   1665	amdgpu_ring_write(ring, reg << 2);
   1666	amdgpu_ring_write(ring, mask);
   1667	amdgpu_ring_write(ring, val);
   1668}
   1669
   1670void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
   1671				unsigned int vmid, uint64_t pd_addr)
   1672{
   1673	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
   1674
   1675	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
   1676
   1677	/* wait for reg writes */
   1678	vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
   1679					vmid * hub->ctx_addr_distance,
   1680					lower_32_bits(pd_addr), 0xffffffff);
   1681}
   1682
   1683void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
   1684{
   1685	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
   1686	amdgpu_ring_write(ring,	reg << 2);
   1687	amdgpu_ring_write(ring, val);
   1688}
   1689
   1690static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
   1691					struct amdgpu_irq_src *source,
   1692					unsigned type,
   1693					enum amdgpu_interrupt_state state)
   1694{
   1695	return 0;
   1696}
   1697
   1698static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
   1699				      struct amdgpu_irq_src *source,
   1700				      struct amdgpu_iv_entry *entry)
   1701{
   1702	DRM_DEBUG("IH: VCN TRAP\n");
   1703
   1704	switch (entry->src_id) {
   1705	case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
   1706		amdgpu_fence_process(&adev->vcn.inst->ring_dec);
   1707		break;
   1708	case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
   1709		amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
   1710		break;
   1711	case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
   1712		amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
   1713		break;
   1714	default:
   1715		DRM_ERROR("Unhandled interrupt: %d %d\n",
   1716			  entry->src_id, entry->src_data[0]);
   1717		break;
   1718	}
   1719
   1720	return 0;
   1721}
   1722
   1723int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
   1724{
   1725	struct amdgpu_device *adev = ring->adev;
   1726	uint32_t tmp = 0;
   1727	unsigned i;
   1728	int r;
   1729
   1730	if (amdgpu_sriov_vf(adev))
   1731		return 0;
   1732
   1733	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
   1734	r = amdgpu_ring_alloc(ring, 4);
   1735	if (r)
   1736		return r;
   1737	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
   1738	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
   1739	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
   1740	amdgpu_ring_write(ring, 0xDEADBEEF);
   1741	amdgpu_ring_commit(ring);
   1742	for (i = 0; i < adev->usec_timeout; i++) {
   1743		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
   1744		if (tmp == 0xDEADBEEF)
   1745			break;
   1746		udelay(1);
   1747	}
   1748
   1749	if (i >= adev->usec_timeout)
   1750		r = -ETIMEDOUT;
   1751
   1752	return r;
   1753}
   1754
   1755
   1756static int vcn_v2_0_set_powergating_state(void *handle,
   1757					  enum amd_powergating_state state)
   1758{
   1759	/* This doesn't actually powergate the VCN block.
   1760	 * That's done in the dpm code via the SMC.  This
   1761	 * just re-inits the block as necessary.  The actual
   1762	 * gating still happens in the dpm code.  We should
   1763	 * revisit this when there is a cleaner line between
   1764	 * the smc and the hw blocks
   1765	 */
   1766	int ret;
   1767	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1768
   1769	if (amdgpu_sriov_vf(adev)) {
   1770		adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
   1771		return 0;
   1772	}
   1773
   1774	if (state == adev->vcn.cur_state)
   1775		return 0;
   1776
   1777	if (state == AMD_PG_STATE_GATE)
   1778		ret = vcn_v2_0_stop(adev);
   1779	else
   1780		ret = vcn_v2_0_start(adev);
   1781
   1782	if (!ret)
   1783		adev->vcn.cur_state = state;
   1784	return ret;
   1785}
   1786
   1787static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
   1788				struct amdgpu_mm_table *table)
   1789{
   1790	uint32_t data = 0, loop;
   1791	uint64_t addr = table->gpu_addr;
   1792	struct mmsch_v2_0_init_header *header;
   1793	uint32_t size;
   1794	int i;
   1795
   1796	header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
   1797	size = header->header_size + header->vcn_table_size;
   1798
   1799	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
   1800	 * of memory descriptor location
   1801	 */
   1802	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
   1803	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
   1804
   1805	/* 2, update vmid of descriptor */
   1806	data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
   1807	data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
   1808	/* use domain0 for MM scheduler */
   1809	data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
   1810	WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
   1811
   1812	/* 3, notify mmsch about the size of this descriptor */
   1813	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
   1814
   1815	/* 4, set resp to zero */
   1816	WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
   1817
   1818	adev->vcn.inst->ring_dec.wptr = 0;
   1819	adev->vcn.inst->ring_dec.wptr_old = 0;
   1820	vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
   1821
   1822	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
   1823		adev->vcn.inst->ring_enc[i].wptr = 0;
   1824		adev->vcn.inst->ring_enc[i].wptr_old = 0;
   1825		vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
   1826	}
   1827
   1828	/* 5, kick off the initialization and wait until
   1829	 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
   1830	 */
   1831	WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
   1832
   1833	data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
   1834	loop = 1000;
   1835	while ((data & 0x10000002) != 0x10000002) {
   1836		udelay(10);
   1837		data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
   1838		loop--;
   1839		if (!loop)
   1840			break;
   1841	}
   1842
   1843	if (!loop) {
   1844		DRM_ERROR("failed to init MMSCH, " \
   1845			"mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
   1846		return -EBUSY;
   1847	}
   1848
   1849	return 0;
   1850}
   1851
   1852static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
   1853{
   1854	int r;
   1855	uint32_t tmp;
   1856	struct amdgpu_ring *ring;
   1857	uint32_t offset, size;
   1858	uint32_t table_size = 0;
   1859	struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
   1860	struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
   1861	struct mmsch_v2_0_cmd_end end = { {0} };
   1862	struct mmsch_v2_0_init_header *header;
   1863	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
   1864	uint8_t i = 0;
   1865
   1866	header = (struct mmsch_v2_0_init_header *)init_table;
   1867	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
   1868	direct_rd_mod_wt.cmd_header.command_type =
   1869		MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
   1870	end.cmd_header.command_type = MMSCH_COMMAND__END;
   1871
   1872	if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
   1873		header->version = MMSCH_VERSION;
   1874		header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
   1875
   1876		header->vcn_table_offset = header->header_size;
   1877
   1878		init_table += header->vcn_table_offset;
   1879
   1880		size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
   1881
   1882		MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
   1883			SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
   1884			0xFFFFFFFF, 0x00000004);
   1885
   1886		/* mc resume*/
   1887		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
   1888			MMSCH_V2_0_INSERT_DIRECT_WT(
   1889				SOC15_REG_OFFSET(UVD, i,
   1890					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
   1891				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo);
   1892			MMSCH_V2_0_INSERT_DIRECT_WT(
   1893				SOC15_REG_OFFSET(UVD, i,
   1894					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
   1895				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi);
   1896			offset = 0;
   1897		} else {
   1898			MMSCH_V2_0_INSERT_DIRECT_WT(
   1899				SOC15_REG_OFFSET(UVD, i,
   1900					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
   1901				lower_32_bits(adev->vcn.inst->gpu_addr));
   1902			MMSCH_V2_0_INSERT_DIRECT_WT(
   1903				SOC15_REG_OFFSET(UVD, i,
   1904					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
   1905				upper_32_bits(adev->vcn.inst->gpu_addr));
   1906			offset = size;
   1907		}
   1908
   1909		MMSCH_V2_0_INSERT_DIRECT_WT(
   1910			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
   1911			0);
   1912		MMSCH_V2_0_INSERT_DIRECT_WT(
   1913			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
   1914			size);
   1915
   1916		MMSCH_V2_0_INSERT_DIRECT_WT(
   1917			SOC15_REG_OFFSET(UVD, i,
   1918				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
   1919			lower_32_bits(adev->vcn.inst->gpu_addr + offset));
   1920		MMSCH_V2_0_INSERT_DIRECT_WT(
   1921			SOC15_REG_OFFSET(UVD, i,
   1922				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
   1923			upper_32_bits(adev->vcn.inst->gpu_addr + offset));
   1924		MMSCH_V2_0_INSERT_DIRECT_WT(
   1925			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
   1926			0);
   1927		MMSCH_V2_0_INSERT_DIRECT_WT(
   1928			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
   1929			AMDGPU_VCN_STACK_SIZE);
   1930
   1931		MMSCH_V2_0_INSERT_DIRECT_WT(
   1932			SOC15_REG_OFFSET(UVD, i,
   1933				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
   1934			lower_32_bits(adev->vcn.inst->gpu_addr + offset +
   1935				AMDGPU_VCN_STACK_SIZE));
   1936		MMSCH_V2_0_INSERT_DIRECT_WT(
   1937			SOC15_REG_OFFSET(UVD, i,
   1938				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
   1939			upper_32_bits(adev->vcn.inst->gpu_addr + offset +
   1940				AMDGPU_VCN_STACK_SIZE));
   1941		MMSCH_V2_0_INSERT_DIRECT_WT(
   1942			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
   1943			0);
   1944		MMSCH_V2_0_INSERT_DIRECT_WT(
   1945			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
   1946			AMDGPU_VCN_CONTEXT_SIZE);
   1947
   1948		for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
   1949			ring = &adev->vcn.inst->ring_enc[r];
   1950			ring->wptr = 0;
   1951			MMSCH_V2_0_INSERT_DIRECT_WT(
   1952				SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
   1953				lower_32_bits(ring->gpu_addr));
   1954			MMSCH_V2_0_INSERT_DIRECT_WT(
   1955				SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
   1956				upper_32_bits(ring->gpu_addr));
   1957			MMSCH_V2_0_INSERT_DIRECT_WT(
   1958				SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
   1959				ring->ring_size / 4);
   1960		}
   1961
   1962		ring = &adev->vcn.inst->ring_dec;
   1963		ring->wptr = 0;
   1964		MMSCH_V2_0_INSERT_DIRECT_WT(
   1965			SOC15_REG_OFFSET(UVD, i,
   1966				mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
   1967			lower_32_bits(ring->gpu_addr));
   1968		MMSCH_V2_0_INSERT_DIRECT_WT(
   1969			SOC15_REG_OFFSET(UVD, i,
   1970				mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
   1971			upper_32_bits(ring->gpu_addr));
   1972		/* force RBC into idle state */
   1973		tmp = order_base_2(ring->ring_size);
   1974		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
   1975		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
   1976		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
   1977		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
   1978		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
   1979		MMSCH_V2_0_INSERT_DIRECT_WT(
   1980			SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
   1981
   1982		/* add end packet */
   1983		tmp = sizeof(struct mmsch_v2_0_cmd_end);
   1984		memcpy((void *)init_table, &end, tmp);
   1985		table_size += (tmp / 4);
   1986		header->vcn_table_size = table_size;
   1987
   1988	}
   1989	return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
   1990}
   1991
   1992static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
   1993	.name = "vcn_v2_0",
   1994	.early_init = vcn_v2_0_early_init,
   1995	.late_init = NULL,
   1996	.sw_init = vcn_v2_0_sw_init,
   1997	.sw_fini = vcn_v2_0_sw_fini,
   1998	.hw_init = vcn_v2_0_hw_init,
   1999	.hw_fini = vcn_v2_0_hw_fini,
   2000	.suspend = vcn_v2_0_suspend,
   2001	.resume = vcn_v2_0_resume,
   2002	.is_idle = vcn_v2_0_is_idle,
   2003	.wait_for_idle = vcn_v2_0_wait_for_idle,
   2004	.check_soft_reset = NULL,
   2005	.pre_soft_reset = NULL,
   2006	.soft_reset = NULL,
   2007	.post_soft_reset = NULL,
   2008	.set_clockgating_state = vcn_v2_0_set_clockgating_state,
   2009	.set_powergating_state = vcn_v2_0_set_powergating_state,
   2010};
   2011
   2012static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
   2013	.type = AMDGPU_RING_TYPE_VCN_DEC,
   2014	.align_mask = 0xf,
   2015	.secure_submission_supported = true,
   2016	.vmhub = AMDGPU_MMHUB_0,
   2017	.get_rptr = vcn_v2_0_dec_ring_get_rptr,
   2018	.get_wptr = vcn_v2_0_dec_ring_get_wptr,
   2019	.set_wptr = vcn_v2_0_dec_ring_set_wptr,
   2020	.emit_frame_size =
   2021		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
   2022		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
   2023		8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
   2024		14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
   2025		6,
   2026	.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
   2027	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
   2028	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
   2029	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
   2030	.test_ring = vcn_v2_0_dec_ring_test_ring,
   2031	.test_ib = amdgpu_vcn_dec_ring_test_ib,
   2032	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
   2033	.insert_start = vcn_v2_0_dec_ring_insert_start,
   2034	.insert_end = vcn_v2_0_dec_ring_insert_end,
   2035	.pad_ib = amdgpu_ring_generic_pad_ib,
   2036	.begin_use = amdgpu_vcn_ring_begin_use,
   2037	.end_use = amdgpu_vcn_ring_end_use,
   2038	.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
   2039	.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
   2040	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
   2041};
   2042
   2043static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
   2044	.type = AMDGPU_RING_TYPE_VCN_ENC,
   2045	.align_mask = 0x3f,
   2046	.nop = VCN_ENC_CMD_NO_OP,
   2047	.vmhub = AMDGPU_MMHUB_0,
   2048	.get_rptr = vcn_v2_0_enc_ring_get_rptr,
   2049	.get_wptr = vcn_v2_0_enc_ring_get_wptr,
   2050	.set_wptr = vcn_v2_0_enc_ring_set_wptr,
   2051	.emit_frame_size =
   2052		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
   2053		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
   2054		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
   2055		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
   2056		1, /* vcn_v2_0_enc_ring_insert_end */
   2057	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
   2058	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
   2059	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
   2060	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
   2061	.test_ring = amdgpu_vcn_enc_ring_test_ring,
   2062	.test_ib = amdgpu_vcn_enc_ring_test_ib,
   2063	.insert_nop = amdgpu_ring_insert_nop,
   2064	.insert_end = vcn_v2_0_enc_ring_insert_end,
   2065	.pad_ib = amdgpu_ring_generic_pad_ib,
   2066	.begin_use = amdgpu_vcn_ring_begin_use,
   2067	.end_use = amdgpu_vcn_ring_end_use,
   2068	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
   2069	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
   2070	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
   2071};
   2072
   2073static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
   2074{
   2075	adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
   2076	DRM_INFO("VCN decode is enabled in VM mode\n");
   2077}
   2078
   2079static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
   2080{
   2081	int i;
   2082
   2083	for (i = 0; i < adev->vcn.num_enc_rings; ++i)
   2084		adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
   2085
   2086	DRM_INFO("VCN encode is enabled in VM mode\n");
   2087}
   2088
   2089static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
   2090	.set = vcn_v2_0_set_interrupt_state,
   2091	.process = vcn_v2_0_process_interrupt,
   2092};
   2093
   2094static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
   2095{
   2096	adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
   2097	adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
   2098}
   2099
   2100const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
   2101{
   2102		.type = AMD_IP_BLOCK_TYPE_VCN,
   2103		.major = 2,
   2104		.minor = 0,
   2105		.rev = 0,
   2106		.funcs = &vcn_v2_0_ip_funcs,
   2107};