cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vce_v2_0.c (16524B)


      1/*
      2 * Copyright 2013 Advanced Micro Devices, Inc.
      3 * All Rights Reserved.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining a
      6 * copy of this software and associated documentation files (the
      7 * "Software"), to deal in the Software without restriction, including
      8 * without limitation the rights to use, copy, modify, merge, publish,
      9 * distribute, sub license, and/or sell copies of the Software, and to
     10 * permit persons to whom the Software is furnished to do so, subject to
     11 * the following conditions:
     12 *
     13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
     20 *
     21 * The above copyright notice and this permission notice (including the
     22 * next paragraph) shall be included in all copies or substantial portions
     23 * of the Software.
     24 *
     25 * Authors: Christian König <christian.koenig@amd.com>
     26 */
     27
     28#include <linux/firmware.h>
     29
     30#include "amdgpu.h"
     31#include "amdgpu_vce.h"
     32#include "cikd.h"
     33#include "vce/vce_2_0_d.h"
     34#include "vce/vce_2_0_sh_mask.h"
     35#include "smu/smu_7_0_1_d.h"
     36#include "smu/smu_7_0_1_sh_mask.h"
     37#include "oss/oss_2_0_d.h"
     38#include "oss/oss_2_0_sh_mask.h"
     39
     40#define VCE_V2_0_FW_SIZE	(256 * 1024)
     41#define VCE_V2_0_STACK_SIZE	(64 * 1024)
     42#define VCE_V2_0_DATA_SIZE	(23552 * AMDGPU_MAX_VCE_HANDLES)
     43#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
     44
     45static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
     46static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
     47
     48/**
     49 * vce_v2_0_ring_get_rptr - get read pointer
     50 *
     51 * @ring: amdgpu_ring pointer
     52 *
     53 * Returns the current hardware read pointer
     54 */
     55static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
     56{
     57	struct amdgpu_device *adev = ring->adev;
     58
     59	if (ring->me == 0)
     60		return RREG32(mmVCE_RB_RPTR);
     61	else
     62		return RREG32(mmVCE_RB_RPTR2);
     63}
     64
     65/**
     66 * vce_v2_0_ring_get_wptr - get write pointer
     67 *
     68 * @ring: amdgpu_ring pointer
     69 *
     70 * Returns the current hardware write pointer
     71 */
     72static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
     73{
     74	struct amdgpu_device *adev = ring->adev;
     75
     76	if (ring->me == 0)
     77		return RREG32(mmVCE_RB_WPTR);
     78	else
     79		return RREG32(mmVCE_RB_WPTR2);
     80}
     81
     82/**
     83 * vce_v2_0_ring_set_wptr - set write pointer
     84 *
     85 * @ring: amdgpu_ring pointer
     86 *
     87 * Commits the write pointer to the hardware
     88 */
     89static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
     90{
     91	struct amdgpu_device *adev = ring->adev;
     92
     93	if (ring->me == 0)
     94		WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
     95	else
     96		WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
     97}
     98
     99static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
    100{
    101	int i, j;
    102
    103	for (i = 0; i < 10; ++i) {
    104		for (j = 0; j < 100; ++j) {
    105			uint32_t status = RREG32(mmVCE_LMI_STATUS);
    106
    107			if (status & 0x337f)
    108				return 0;
    109			mdelay(10);
    110		}
    111	}
    112
    113	return -ETIMEDOUT;
    114}
    115
    116static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
    117{
    118	int i, j;
    119
    120	for (i = 0; i < 10; ++i) {
    121		for (j = 0; j < 100; ++j) {
    122			uint32_t status = RREG32(mmVCE_STATUS);
    123
    124			if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
    125				return 0;
    126			mdelay(10);
    127		}
    128
    129		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
    130		WREG32_P(mmVCE_SOFT_RESET,
    131			VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
    132			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
    133		mdelay(10);
    134		WREG32_P(mmVCE_SOFT_RESET, 0,
    135			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
    136		mdelay(10);
    137	}
    138
    139	return -ETIMEDOUT;
    140}
    141
    142static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
    143{
    144	WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
    145}
    146
    147static void vce_v2_0_init_cg(struct amdgpu_device *adev)
    148{
    149	u32 tmp;
    150
    151	tmp = RREG32(mmVCE_CLOCK_GATING_A);
    152	tmp &= ~0xfff;
    153	tmp |= ((0 << 0) | (4 << 4));
    154	tmp |= 0x40000;
    155	WREG32(mmVCE_CLOCK_GATING_A, tmp);
    156
    157	tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
    158	tmp &= ~0xfff;
    159	tmp |= ((0 << 0) | (4 << 4));
    160	WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
    161
    162	tmp = RREG32(mmVCE_CLOCK_GATING_B);
    163	tmp |= 0x10;
    164	tmp &= ~0x100000;
    165	WREG32(mmVCE_CLOCK_GATING_B, tmp);
    166}
    167
    168static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
    169{
    170	uint32_t size, offset;
    171
    172	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
    173	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
    174	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
    175	WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
    176
    177	WREG32(mmVCE_LMI_CTRL, 0x00398000);
    178	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
    179	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
    180	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
    181	WREG32(mmVCE_LMI_VM_CTRL, 0);
    182
    183	WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
    184
    185	offset = AMDGPU_VCE_FIRMWARE_OFFSET;
    186	size = VCE_V2_0_FW_SIZE;
    187	WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
    188	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
    189
    190	offset += size;
    191	size = VCE_V2_0_STACK_SIZE;
    192	WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
    193	WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
    194
    195	offset += size;
    196	size = VCE_V2_0_DATA_SIZE;
    197	WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
    198	WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
    199
    200	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
    201	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
    202}
    203
    204static bool vce_v2_0_is_idle(void *handle)
    205{
    206	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    207
    208	return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
    209}
    210
    211static int vce_v2_0_wait_for_idle(void *handle)
    212{
    213	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    214	unsigned i;
    215
    216	for (i = 0; i < adev->usec_timeout; i++) {
    217		if (vce_v2_0_is_idle(handle))
    218			return 0;
    219	}
    220	return -ETIMEDOUT;
    221}
    222
    223/**
    224 * vce_v2_0_start - start VCE block
    225 *
    226 * @adev: amdgpu_device pointer
    227 *
    228 * Setup and start the VCE block
    229 */
    230static int vce_v2_0_start(struct amdgpu_device *adev)
    231{
    232	struct amdgpu_ring *ring;
    233	int r;
    234
    235	/* set BUSY flag */
    236	WREG32_P(mmVCE_STATUS, 1, ~1);
    237
    238	vce_v2_0_init_cg(adev);
    239	vce_v2_0_disable_cg(adev);
    240
    241	vce_v2_0_mc_resume(adev);
    242
    243	ring = &adev->vce.ring[0];
    244	WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
    245	WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
    246	WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
    247	WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
    248	WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
    249
    250	ring = &adev->vce.ring[1];
    251	WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
    252	WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
    253	WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
    254	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
    255	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
    256
    257	WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
    258	WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
    259	mdelay(100);
    260	WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
    261
    262	r = vce_v2_0_firmware_loaded(adev);
    263
    264	/* clear BUSY flag */
    265	WREG32_P(mmVCE_STATUS, 0, ~1);
    266
    267	if (r) {
    268		DRM_ERROR("VCE not responding, giving up!!!\n");
    269		return r;
    270	}
    271
    272	return 0;
    273}
    274
    275static int vce_v2_0_stop(struct amdgpu_device *adev)
    276{
    277	int i;
    278	int status;
    279
    280	if (vce_v2_0_lmi_clean(adev)) {
    281		DRM_INFO("vce is not idle \n");
    282		return 0;
    283	}
    284
    285	if (vce_v2_0_wait_for_idle(adev)) {
    286		DRM_INFO("VCE is busy, Can't set clock gating");
    287		return 0;
    288	}
    289
    290	/* Stall UMC and register bus before resetting VCPU */
    291	WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
    292
    293	for (i = 0; i < 100; ++i) {
    294		status = RREG32(mmVCE_LMI_STATUS);
    295		if (status & 0x240)
    296			break;
    297		mdelay(1);
    298	}
    299
    300	WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
    301
    302	/* put LMI, VCPU, RBC etc... into reset */
    303	WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
    304
    305	WREG32(mmVCE_STATUS, 0);
    306
    307	return 0;
    308}
    309
    310static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
    311{
    312	u32 tmp;
    313
    314	if (gated) {
    315		tmp = RREG32(mmVCE_CLOCK_GATING_B);
    316		tmp |= 0xe70000;
    317		WREG32(mmVCE_CLOCK_GATING_B, tmp);
    318
    319		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
    320		tmp |= 0xff000000;
    321		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
    322
    323		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
    324		tmp &= ~0x3fc;
    325		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
    326
    327		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
    328	} else {
    329		tmp = RREG32(mmVCE_CLOCK_GATING_B);
    330		tmp |= 0xe7;
    331		tmp &= ~0xe70000;
    332		WREG32(mmVCE_CLOCK_GATING_B, tmp);
    333
    334		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
    335		tmp |= 0x1fe000;
    336		tmp &= ~0xff000000;
    337		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
    338
    339		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
    340		tmp |= 0x3fc;
    341		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
    342	}
    343}
    344
    345static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
    346{
    347	u32 orig, tmp;
    348
    349/* LMI_MC/LMI_UMC always set in dynamic,
    350 * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
    351 */
    352	tmp = RREG32(mmVCE_CLOCK_GATING_B);
    353	tmp &= ~0x00060006;
    354
    355/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
    356	if (gated) {
    357		tmp |= 0xe10000;
    358		WREG32(mmVCE_CLOCK_GATING_B, tmp);
    359	} else {
    360		tmp |= 0xe1;
    361		tmp &= ~0xe10000;
    362		WREG32(mmVCE_CLOCK_GATING_B, tmp);
    363	}
    364
    365	orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
    366	tmp &= ~0x1fe000;
    367	tmp &= ~0xff000000;
    368	if (tmp != orig)
    369		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
    370
    371	orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
    372	tmp &= ~0x3fc;
    373	if (tmp != orig)
    374		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
    375
    376	/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
    377	WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
    378
    379	if(gated)
    380		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
    381}
    382
    383static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
    384								bool sw_cg)
    385{
    386	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
    387		if (sw_cg)
    388			vce_v2_0_set_sw_cg(adev, true);
    389		else
    390			vce_v2_0_set_dyn_cg(adev, true);
    391	} else {
    392		vce_v2_0_disable_cg(adev);
    393
    394		if (sw_cg)
    395			vce_v2_0_set_sw_cg(adev, false);
    396		else
    397			vce_v2_0_set_dyn_cg(adev, false);
    398	}
    399}
    400
    401static int vce_v2_0_early_init(void *handle)
    402{
    403	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    404
    405	adev->vce.num_rings = 2;
    406
    407	vce_v2_0_set_ring_funcs(adev);
    408	vce_v2_0_set_irq_funcs(adev);
    409
    410	return 0;
    411}
    412
    413static int vce_v2_0_sw_init(void *handle)
    414{
    415	struct amdgpu_ring *ring;
    416	int r, i;
    417	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    418
    419	/* VCE */
    420	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
    421	if (r)
    422		return r;
    423
    424	r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
    425		VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
    426	if (r)
    427		return r;
    428
    429	r = amdgpu_vce_resume(adev);
    430	if (r)
    431		return r;
    432
    433	for (i = 0; i < adev->vce.num_rings; i++) {
    434		enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
    435
    436		ring = &adev->vce.ring[i];
    437		sprintf(ring->name, "vce%d", i);
    438		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
    439				     hw_prio, NULL);
    440		if (r)
    441			return r;
    442	}
    443
    444	r = amdgpu_vce_entity_init(adev);
    445
    446	return r;
    447}
    448
    449static int vce_v2_0_sw_fini(void *handle)
    450{
    451	int r;
    452	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    453
    454	r = amdgpu_vce_suspend(adev);
    455	if (r)
    456		return r;
    457
    458	return amdgpu_vce_sw_fini(adev);
    459}
    460
    461static int vce_v2_0_hw_init(void *handle)
    462{
    463	int r, i;
    464	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    465
    466	amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
    467	vce_v2_0_enable_mgcg(adev, true, false);
    468
    469	for (i = 0; i < adev->vce.num_rings; i++) {
    470		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
    471		if (r)
    472			return r;
    473	}
    474
    475	DRM_INFO("VCE initialized successfully.\n");
    476
    477	return 0;
    478}
    479
    480static int vce_v2_0_hw_fini(void *handle)
    481{
    482	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    483
    484	cancel_delayed_work_sync(&adev->vce.idle_work);
    485
    486	return 0;
    487}
    488
    489static int vce_v2_0_suspend(void *handle)
    490{
    491	int r;
    492	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    493
    494
    495	/*
    496	 * Proper cleanups before halting the HW engine:
    497	 *   - cancel the delayed idle work
    498	 *   - enable powergating
    499	 *   - enable clockgating
    500	 *   - disable dpm
    501	 *
    502	 * TODO: to align with the VCN implementation, move the
    503	 * jobs for clockgating/powergating/dpm setting to
    504	 * ->set_powergating_state().
    505	 */
    506	cancel_delayed_work_sync(&adev->vce.idle_work);
    507
    508	if (adev->pm.dpm_enabled) {
    509		amdgpu_dpm_enable_vce(adev, false);
    510	} else {
    511		amdgpu_asic_set_vce_clocks(adev, 0, 0);
    512		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
    513						       AMD_PG_STATE_GATE);
    514		amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
    515						       AMD_CG_STATE_GATE);
    516	}
    517
    518	r = vce_v2_0_hw_fini(adev);
    519	if (r)
    520		return r;
    521
    522	return amdgpu_vce_suspend(adev);
    523}
    524
    525static int vce_v2_0_resume(void *handle)
    526{
    527	int r;
    528	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    529
    530	r = amdgpu_vce_resume(adev);
    531	if (r)
    532		return r;
    533
    534	return vce_v2_0_hw_init(adev);
    535}
    536
    537static int vce_v2_0_soft_reset(void *handle)
    538{
    539	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    540
    541	WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
    542	mdelay(5);
    543
    544	return vce_v2_0_start(adev);
    545}
    546
    547static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
    548					struct amdgpu_irq_src *source,
    549					unsigned type,
    550					enum amdgpu_interrupt_state state)
    551{
    552	uint32_t val = 0;
    553
    554	if (state == AMDGPU_IRQ_STATE_ENABLE)
    555		val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
    556
    557	WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
    558	return 0;
    559}
    560
    561static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
    562				      struct amdgpu_irq_src *source,
    563				      struct amdgpu_iv_entry *entry)
    564{
    565	DRM_DEBUG("IH: VCE\n");
    566	switch (entry->src_data[0]) {
    567	case 0:
    568	case 1:
    569		amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
    570		break;
    571	default:
    572		DRM_ERROR("Unhandled interrupt: %d %d\n",
    573			  entry->src_id, entry->src_data[0]);
    574		break;
    575	}
    576
    577	return 0;
    578}
    579
    580static int vce_v2_0_set_clockgating_state(void *handle,
    581					  enum amd_clockgating_state state)
    582{
    583	bool gate = false;
    584	bool sw_cg = false;
    585
    586	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    587
    588	if (state == AMD_CG_STATE_GATE) {
    589		gate = true;
    590		sw_cg = true;
    591	}
    592
    593	vce_v2_0_enable_mgcg(adev, gate, sw_cg);
    594
    595	return 0;
    596}
    597
    598static int vce_v2_0_set_powergating_state(void *handle,
    599					  enum amd_powergating_state state)
    600{
    601	/* This doesn't actually powergate the VCE block.
    602	 * That's done in the dpm code via the SMC.  This
    603	 * just re-inits the block as necessary.  The actual
    604	 * gating still happens in the dpm code.  We should
    605	 * revisit this when there is a cleaner line between
    606	 * the smc and the hw blocks
    607	 */
    608	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    609
    610	if (state == AMD_PG_STATE_GATE)
    611		return vce_v2_0_stop(adev);
    612	else
    613		return vce_v2_0_start(adev);
    614}
    615
    616static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
    617	.name = "vce_v2_0",
    618	.early_init = vce_v2_0_early_init,
    619	.late_init = NULL,
    620	.sw_init = vce_v2_0_sw_init,
    621	.sw_fini = vce_v2_0_sw_fini,
    622	.hw_init = vce_v2_0_hw_init,
    623	.hw_fini = vce_v2_0_hw_fini,
    624	.suspend = vce_v2_0_suspend,
    625	.resume = vce_v2_0_resume,
    626	.is_idle = vce_v2_0_is_idle,
    627	.wait_for_idle = vce_v2_0_wait_for_idle,
    628	.soft_reset = vce_v2_0_soft_reset,
    629	.set_clockgating_state = vce_v2_0_set_clockgating_state,
    630	.set_powergating_state = vce_v2_0_set_powergating_state,
    631};
    632
    633static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
    634	.type = AMDGPU_RING_TYPE_VCE,
    635	.align_mask = 0xf,
    636	.nop = VCE_CMD_NO_OP,
    637	.support_64bit_ptrs = false,
    638	.no_user_fence = true,
    639	.get_rptr = vce_v2_0_ring_get_rptr,
    640	.get_wptr = vce_v2_0_ring_get_wptr,
    641	.set_wptr = vce_v2_0_ring_set_wptr,
    642	.parse_cs = amdgpu_vce_ring_parse_cs,
    643	.emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence  x1 no user fence */
    644	.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
    645	.emit_ib = amdgpu_vce_ring_emit_ib,
    646	.emit_fence = amdgpu_vce_ring_emit_fence,
    647	.test_ring = amdgpu_vce_ring_test_ring,
    648	.test_ib = amdgpu_vce_ring_test_ib,
    649	.insert_nop = amdgpu_ring_insert_nop,
    650	.pad_ib = amdgpu_ring_generic_pad_ib,
    651	.begin_use = amdgpu_vce_ring_begin_use,
    652	.end_use = amdgpu_vce_ring_end_use,
    653};
    654
    655static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
    656{
    657	int i;
    658
    659	for (i = 0; i < adev->vce.num_rings; i++) {
    660		adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
    661		adev->vce.ring[i].me = i;
    662	}
    663}
    664
    665static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
    666	.set = vce_v2_0_set_interrupt_state,
    667	.process = vce_v2_0_process_interrupt,
    668};
    669
    670static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
    671{
    672	adev->vce.irq.num_types = 1;
    673	adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
    674};
    675
    676const struct amdgpu_ip_block_version vce_v2_0_ip_block =
    677{
    678		.type = AMD_IP_BLOCK_TYPE_VCE,
    679		.major = 2,
    680		.minor = 0,
    681		.rev = 0,
    682		.funcs = &vce_v2_0_ip_funcs,
    683};