cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uvd_v7_0.c (57045B)


      1/*
      2 * Copyright 2016 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#include <linux/firmware.h>
     25
     26#include "amdgpu.h"
     27#include "amdgpu_uvd.h"
     28#include "amdgpu_cs.h"
     29#include "soc15.h"
     30#include "soc15d.h"
     31#include "soc15_common.h"
     32#include "mmsch_v1_0.h"
     33
     34#include "uvd/uvd_7_0_offset.h"
     35#include "uvd/uvd_7_0_sh_mask.h"
     36#include "vce/vce_4_0_offset.h"
     37#include "vce/vce_4_0_default.h"
     38#include "vce/vce_4_0_sh_mask.h"
     39#include "nbif/nbif_6_1_offset.h"
     40#include "mmhub/mmhub_1_0_offset.h"
     41#include "mmhub/mmhub_1_0_sh_mask.h"
     42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
     43
     44#define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
     45#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
     46//UVD_PG0_CC_UVD_HARVESTING
     47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
     48#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
     49
     50#define UVD7_MAX_HW_INSTANCES_VEGA20			2
     51
     52static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
     53static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
     54static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
     55static int uvd_v7_0_start(struct amdgpu_device *adev);
     56static void uvd_v7_0_stop(struct amdgpu_device *adev);
     57static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
     58
     59static int amdgpu_ih_clientid_uvds[] = {
     60	SOC15_IH_CLIENTID_UVD,
     61	SOC15_IH_CLIENTID_UVD1
     62};
     63
     64/**
     65 * uvd_v7_0_ring_get_rptr - get read pointer
     66 *
     67 * @ring: amdgpu_ring pointer
     68 *
     69 * Returns the current hardware read pointer
     70 */
     71static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
     72{
     73	struct amdgpu_device *adev = ring->adev;
     74
     75	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
     76}
     77
     78/**
     79 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
     80 *
     81 * @ring: amdgpu_ring pointer
     82 *
     83 * Returns the current hardware enc read pointer
     84 */
     85static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
     86{
     87	struct amdgpu_device *adev = ring->adev;
     88
     89	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
     90		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
     91	else
     92		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
     93}
     94
     95/**
     96 * uvd_v7_0_ring_get_wptr - get write pointer
     97 *
     98 * @ring: amdgpu_ring pointer
     99 *
    100 * Returns the current hardware write pointer
    101 */
    102static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
    103{
    104	struct amdgpu_device *adev = ring->adev;
    105
    106	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
    107}
    108
    109/**
    110 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
    111 *
    112 * @ring: amdgpu_ring pointer
    113 *
    114 * Returns the current hardware enc write pointer
    115 */
    116static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
    117{
    118	struct amdgpu_device *adev = ring->adev;
    119
    120	if (ring->use_doorbell)
    121		return *ring->wptr_cpu_addr;
    122
    123	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
    124		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
    125	else
    126		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
    127}
    128
    129/**
    130 * uvd_v7_0_ring_set_wptr - set write pointer
    131 *
    132 * @ring: amdgpu_ring pointer
    133 *
    134 * Commits the write pointer to the hardware
    135 */
    136static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
    137{
    138	struct amdgpu_device *adev = ring->adev;
    139
    140	WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
    141}
    142
    143/**
    144 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
    145 *
    146 * @ring: amdgpu_ring pointer
    147 *
    148 * Commits the enc write pointer to the hardware
    149 */
    150static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
    151{
    152	struct amdgpu_device *adev = ring->adev;
    153
    154	if (ring->use_doorbell) {
    155		/* XXX check if swapping is necessary on BE */
    156		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
    157		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
    158		return;
    159	}
    160
    161	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
    162		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
    163			lower_32_bits(ring->wptr));
    164	else
    165		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
    166			lower_32_bits(ring->wptr));
    167}
    168
    169/**
    170 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
    171 *
    172 * @ring: the engine to test on
    173 *
    174 */
    175static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
    176{
    177	struct amdgpu_device *adev = ring->adev;
    178	uint32_t rptr;
    179	unsigned i;
    180	int r;
    181
    182	if (amdgpu_sriov_vf(adev))
    183		return 0;
    184
    185	r = amdgpu_ring_alloc(ring, 16);
    186	if (r)
    187		return r;
    188
    189	rptr = amdgpu_ring_get_rptr(ring);
    190
    191	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
    192	amdgpu_ring_commit(ring);
    193
    194	for (i = 0; i < adev->usec_timeout; i++) {
    195		if (amdgpu_ring_get_rptr(ring) != rptr)
    196			break;
    197		udelay(1);
    198	}
    199
    200	if (i >= adev->usec_timeout)
    201		r = -ETIMEDOUT;
    202
    203	return r;
    204}
    205
    206/**
    207 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
    208 *
    209 * @ring: ring we should submit the msg to
    210 * @handle: session handle to use
    211 * @bo: amdgpu object for which we query the offset
    212 * @fence: optional fence to return
    213 *
    214 * Open up a stream for HW test
    215 */
    216static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
    217				       struct amdgpu_bo *bo,
    218				       struct dma_fence **fence)
    219{
    220	const unsigned ib_size_dw = 16;
    221	struct amdgpu_job *job;
    222	struct amdgpu_ib *ib;
    223	struct dma_fence *f = NULL;
    224	uint64_t addr;
    225	int i, r;
    226
    227	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
    228					AMDGPU_IB_POOL_DIRECT, &job);
    229	if (r)
    230		return r;
    231
    232	ib = &job->ibs[0];
    233	addr = amdgpu_bo_gpu_offset(bo);
    234
    235	ib->length_dw = 0;
    236	ib->ptr[ib->length_dw++] = 0x00000018;
    237	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
    238	ib->ptr[ib->length_dw++] = handle;
    239	ib->ptr[ib->length_dw++] = 0x00000000;
    240	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
    241	ib->ptr[ib->length_dw++] = addr;
    242
    243	ib->ptr[ib->length_dw++] = 0x00000014;
    244	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
    245	ib->ptr[ib->length_dw++] = 0x0000001c;
    246	ib->ptr[ib->length_dw++] = 0x00000000;
    247	ib->ptr[ib->length_dw++] = 0x00000000;
    248
    249	ib->ptr[ib->length_dw++] = 0x00000008;
    250	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
    251
    252	for (i = ib->length_dw; i < ib_size_dw; ++i)
    253		ib->ptr[i] = 0x0;
    254
    255	r = amdgpu_job_submit_direct(job, ring, &f);
    256	if (r)
    257		goto err;
    258
    259	if (fence)
    260		*fence = dma_fence_get(f);
    261	dma_fence_put(f);
    262	return 0;
    263
    264err:
    265	amdgpu_job_free(job);
    266	return r;
    267}
    268
    269/**
    270 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
    271 *
    272 * @ring: ring we should submit the msg to
    273 * @handle: session handle to use
    274 * @bo: amdgpu object for which we query the offset
    275 * @fence: optional fence to return
    276 *
    277 * Close up a stream for HW test or if userspace failed to do so
    278 */
    279static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
    280					struct amdgpu_bo *bo,
    281					struct dma_fence **fence)
    282{
    283	const unsigned ib_size_dw = 16;
    284	struct amdgpu_job *job;
    285	struct amdgpu_ib *ib;
    286	struct dma_fence *f = NULL;
    287	uint64_t addr;
    288	int i, r;
    289
    290	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
    291					AMDGPU_IB_POOL_DIRECT, &job);
    292	if (r)
    293		return r;
    294
    295	ib = &job->ibs[0];
    296	addr = amdgpu_bo_gpu_offset(bo);
    297
    298	ib->length_dw = 0;
    299	ib->ptr[ib->length_dw++] = 0x00000018;
    300	ib->ptr[ib->length_dw++] = 0x00000001;
    301	ib->ptr[ib->length_dw++] = handle;
    302	ib->ptr[ib->length_dw++] = 0x00000000;
    303	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
    304	ib->ptr[ib->length_dw++] = addr;
    305
    306	ib->ptr[ib->length_dw++] = 0x00000014;
    307	ib->ptr[ib->length_dw++] = 0x00000002;
    308	ib->ptr[ib->length_dw++] = 0x0000001c;
    309	ib->ptr[ib->length_dw++] = 0x00000000;
    310	ib->ptr[ib->length_dw++] = 0x00000000;
    311
    312	ib->ptr[ib->length_dw++] = 0x00000008;
    313	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
    314
    315	for (i = ib->length_dw; i < ib_size_dw; ++i)
    316		ib->ptr[i] = 0x0;
    317
    318	r = amdgpu_job_submit_direct(job, ring, &f);
    319	if (r)
    320		goto err;
    321
    322	if (fence)
    323		*fence = dma_fence_get(f);
    324	dma_fence_put(f);
    325	return 0;
    326
    327err:
    328	amdgpu_job_free(job);
    329	return r;
    330}
    331
    332/**
    333 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
    334 *
    335 * @ring: the engine to test on
    336 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
    337 *
    338 */
    339static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
    340{
    341	struct dma_fence *fence = NULL;
    342	struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
    343	long r;
    344
    345	r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
    346	if (r)
    347		goto error;
    348
    349	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
    350	if (r)
    351		goto error;
    352
    353	r = dma_fence_wait_timeout(fence, false, timeout);
    354	if (r == 0)
    355		r = -ETIMEDOUT;
    356	else if (r > 0)
    357		r = 0;
    358
    359error:
    360	dma_fence_put(fence);
    361	return r;
    362}
    363
    364static int uvd_v7_0_early_init(void *handle)
    365{
    366	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    367
    368	if (adev->asic_type == CHIP_VEGA20) {
    369		u32 harvest;
    370		int i;
    371
    372		adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
    373		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
    374			harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
    375			if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
    376				adev->uvd.harvest_config |= 1 << i;
    377			}
    378		}
    379		if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
    380						 AMDGPU_UVD_HARVEST_UVD1))
    381			/* both instances are harvested, disable the block */
    382			return -ENOENT;
    383	} else {
    384		adev->uvd.num_uvd_inst = 1;
    385	}
    386
    387	if (amdgpu_sriov_vf(adev))
    388		adev->uvd.num_enc_rings = 1;
    389	else
    390		adev->uvd.num_enc_rings = 2;
    391	uvd_v7_0_set_ring_funcs(adev);
    392	uvd_v7_0_set_enc_ring_funcs(adev);
    393	uvd_v7_0_set_irq_funcs(adev);
    394
    395	return 0;
    396}
    397
    398static int uvd_v7_0_sw_init(void *handle)
    399{
    400	struct amdgpu_ring *ring;
    401
    402	int i, j, r;
    403	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    404
    405	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
    406		if (adev->uvd.harvest_config & (1 << j))
    407			continue;
    408		/* UVD TRAP */
    409		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
    410		if (r)
    411			return r;
    412
    413		/* UVD ENC TRAP */
    414		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
    415			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
    416			if (r)
    417				return r;
    418		}
    419	}
    420
    421	r = amdgpu_uvd_sw_init(adev);
    422	if (r)
    423		return r;
    424
    425	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
    426		const struct common_firmware_header *hdr;
    427		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
    428		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
    429		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
    430		adev->firmware.fw_size +=
    431			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
    432
    433		if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
    434			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
    435			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
    436			adev->firmware.fw_size +=
    437				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
    438		}
    439		DRM_INFO("PSP loading UVD firmware\n");
    440	}
    441
    442	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
    443		if (adev->uvd.harvest_config & (1 << j))
    444			continue;
    445		if (!amdgpu_sriov_vf(adev)) {
    446			ring = &adev->uvd.inst[j].ring;
    447			sprintf(ring->name, "uvd_%d", ring->me);
    448			r = amdgpu_ring_init(adev, ring, 512,
    449					     &adev->uvd.inst[j].irq, 0,
    450					     AMDGPU_RING_PRIO_DEFAULT, NULL);
    451			if (r)
    452				return r;
    453		}
    454
    455		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
    456			ring = &adev->uvd.inst[j].ring_enc[i];
    457			sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
    458			if (amdgpu_sriov_vf(adev)) {
    459				ring->use_doorbell = true;
    460
    461				/* currently only use the first enconding ring for
    462				 * sriov, so set unused location for other unused rings.
    463				 */
    464				if (i == 0)
    465					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
    466				else
    467					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
    468			}
    469			r = amdgpu_ring_init(adev, ring, 512,
    470					     &adev->uvd.inst[j].irq, 0,
    471					     AMDGPU_RING_PRIO_DEFAULT, NULL);
    472			if (r)
    473				return r;
    474		}
    475	}
    476
    477	r = amdgpu_uvd_resume(adev);
    478	if (r)
    479		return r;
    480
    481	r = amdgpu_uvd_entity_init(adev);
    482	if (r)
    483		return r;
    484
    485	r = amdgpu_virt_alloc_mm_table(adev);
    486	if (r)
    487		return r;
    488
    489	return r;
    490}
    491
    492static int uvd_v7_0_sw_fini(void *handle)
    493{
    494	int i, j, r;
    495	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    496
    497	amdgpu_virt_free_mm_table(adev);
    498
    499	r = amdgpu_uvd_suspend(adev);
    500	if (r)
    501		return r;
    502
    503	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
    504		if (adev->uvd.harvest_config & (1 << j))
    505			continue;
    506		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
    507			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
    508	}
    509	return amdgpu_uvd_sw_fini(adev);
    510}
    511
    512/**
    513 * uvd_v7_0_hw_init - start and test UVD block
    514 *
    515 * @handle: handle used to pass amdgpu_device pointer
    516 *
    517 * Initialize the hardware, boot up the VCPU and do some testing
    518 */
    519static int uvd_v7_0_hw_init(void *handle)
    520{
    521	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    522	struct amdgpu_ring *ring;
    523	uint32_t tmp;
    524	int i, j, r;
    525
    526	if (amdgpu_sriov_vf(adev))
    527		r = uvd_v7_0_sriov_start(adev);
    528	else
    529		r = uvd_v7_0_start(adev);
    530	if (r)
    531		goto done;
    532
    533	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
    534		if (adev->uvd.harvest_config & (1 << j))
    535			continue;
    536		ring = &adev->uvd.inst[j].ring;
    537
    538		if (!amdgpu_sriov_vf(adev)) {
    539			r = amdgpu_ring_test_helper(ring);
    540			if (r)
    541				goto done;
    542
    543			r = amdgpu_ring_alloc(ring, 10);
    544			if (r) {
    545				DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
    546				goto done;
    547			}
    548
    549			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
    550				mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
    551			amdgpu_ring_write(ring, tmp);
    552			amdgpu_ring_write(ring, 0xFFFFF);
    553
    554			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
    555				mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
    556			amdgpu_ring_write(ring, tmp);
    557			amdgpu_ring_write(ring, 0xFFFFF);
    558
    559			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
    560				mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
    561			amdgpu_ring_write(ring, tmp);
    562			amdgpu_ring_write(ring, 0xFFFFF);
    563
    564			/* Clear timeout status bits */
    565			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
    566				mmUVD_SEMA_TIMEOUT_STATUS), 0));
    567			amdgpu_ring_write(ring, 0x8);
    568
    569			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
    570				mmUVD_SEMA_CNTL), 0));
    571			amdgpu_ring_write(ring, 3);
    572
    573			amdgpu_ring_commit(ring);
    574		}
    575
    576		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
    577			ring = &adev->uvd.inst[j].ring_enc[i];
    578			r = amdgpu_ring_test_helper(ring);
    579			if (r)
    580				goto done;
    581		}
    582	}
    583done:
    584	if (!r)
    585		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
    586
    587	return r;
    588}
    589
    590/**
    591 * uvd_v7_0_hw_fini - stop the hardware block
    592 *
    593 * @handle: handle used to pass amdgpu_device pointer
    594 *
    595 * Stop the UVD block, mark ring as not ready any more
    596 */
    597static int uvd_v7_0_hw_fini(void *handle)
    598{
    599	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    600
    601	cancel_delayed_work_sync(&adev->uvd.idle_work);
    602
    603	if (!amdgpu_sriov_vf(adev))
    604		uvd_v7_0_stop(adev);
    605	else {
    606		/* full access mode, so don't touch any UVD register */
    607		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
    608	}
    609
    610	return 0;
    611}
    612
    613static int uvd_v7_0_suspend(void *handle)
    614{
    615	int r;
    616	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    617
    618	/*
    619	 * Proper cleanups before halting the HW engine:
    620	 *   - cancel the delayed idle work
    621	 *   - enable powergating
    622	 *   - enable clockgating
    623	 *   - disable dpm
    624	 *
    625	 * TODO: to align with the VCN implementation, move the
    626	 * jobs for clockgating/powergating/dpm setting to
    627	 * ->set_powergating_state().
    628	 */
    629	cancel_delayed_work_sync(&adev->uvd.idle_work);
    630
    631	if (adev->pm.dpm_enabled) {
    632		amdgpu_dpm_enable_uvd(adev, false);
    633	} else {
    634		amdgpu_asic_set_uvd_clocks(adev, 0, 0);
    635		/* shutdown the UVD block */
    636		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
    637						       AMD_PG_STATE_GATE);
    638		amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
    639						       AMD_CG_STATE_GATE);
    640	}
    641
    642	r = uvd_v7_0_hw_fini(adev);
    643	if (r)
    644		return r;
    645
    646	return amdgpu_uvd_suspend(adev);
    647}
    648
    649static int uvd_v7_0_resume(void *handle)
    650{
    651	int r;
    652	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    653
    654	r = amdgpu_uvd_resume(adev);
    655	if (r)
    656		return r;
    657
    658	return uvd_v7_0_hw_init(adev);
    659}
    660
    661/**
    662 * uvd_v7_0_mc_resume - memory controller programming
    663 *
    664 * @adev: amdgpu_device pointer
    665 *
    666 * Let the UVD memory controller know it's offsets
    667 */
    668static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
    669{
    670	uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
    671	uint32_t offset;
    672	int i;
    673
    674	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
    675		if (adev->uvd.harvest_config & (1 << i))
    676			continue;
    677		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
    678			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
    679				i == 0 ?
    680				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
    681				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
    682			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
    683				i == 0 ?
    684				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
    685				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
    686			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
    687			offset = 0;
    688		} else {
    689			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
    690				lower_32_bits(adev->uvd.inst[i].gpu_addr));
    691			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
    692				upper_32_bits(adev->uvd.inst[i].gpu_addr));
    693			offset = size;
    694			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
    695					AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
    696		}
    697
    698		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
    699
    700		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
    701				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
    702		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
    703				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
    704		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
    705		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
    706
    707		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
    708				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
    709		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
    710				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
    711		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
    712		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
    713				AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
    714
    715		WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
    716				adev->gfx.config.gb_addr_config);
    717		WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
    718				adev->gfx.config.gb_addr_config);
    719		WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
    720				adev->gfx.config.gb_addr_config);
    721
    722		WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
    723	}
    724}
    725
    726static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
    727				struct amdgpu_mm_table *table)
    728{
    729	uint32_t data = 0, loop;
    730	uint64_t addr = table->gpu_addr;
    731	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
    732	uint32_t size;
    733	int i;
    734
    735	size = header->header_size + header->vce_table_size + header->uvd_table_size;
    736
    737	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
    738	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
    739	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
    740
    741	/* 2, update vmid of descriptor */
    742	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
    743	data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
    744	data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
    745	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
    746
    747	/* 3, notify mmsch about the size of this descriptor */
    748	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
    749
    750	/* 4, set resp to zero */
    751	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
    752
    753	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
    754		if (adev->uvd.harvest_config & (1 << i))
    755			continue;
    756		WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
    757		*adev->uvd.inst[i].ring_enc[0].wptr_cpu_addr = 0;
    758		adev->uvd.inst[i].ring_enc[0].wptr = 0;
    759		adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
    760	}
    761	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
    762	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
    763
    764	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
    765	loop = 1000;
    766	while ((data & 0x10000002) != 0x10000002) {
    767		udelay(10);
    768		data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
    769		loop--;
    770		if (!loop)
    771			break;
    772	}
    773
    774	if (!loop) {
    775		dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
    776		return -EBUSY;
    777	}
    778
    779	return 0;
    780}
    781
    782static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
    783{
    784	struct amdgpu_ring *ring;
    785	uint32_t offset, size, tmp;
    786	uint32_t table_size = 0;
    787	struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
    788	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
    789	struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
    790	struct mmsch_v1_0_cmd_end end = { {0} };
    791	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
    792	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
    793	uint8_t i = 0;
    794
    795	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
    796	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
    797	direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
    798	end.cmd_header.command_type = MMSCH_COMMAND__END;
    799
    800	if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
    801		header->version = MMSCH_VERSION;
    802		header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
    803
    804		if (header->vce_table_offset == 0 && header->vce_table_size == 0)
    805			header->uvd_table_offset = header->header_size;
    806		else
    807			header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
    808
    809		init_table += header->uvd_table_offset;
    810
    811		for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
    812			if (adev->uvd.harvest_config & (1 << i))
    813				continue;
    814			ring = &adev->uvd.inst[i].ring;
    815			ring->wptr = 0;
    816			size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
    817
    818			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
    819							   0xFFFFFFFF, 0x00000004);
    820			/* mc resume*/
    821			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
    822				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
    823							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
    824							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
    825				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
    826							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
    827							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
    828				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
    829				offset = 0;
    830			} else {
    831				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
    832							    lower_32_bits(adev->uvd.inst[i].gpu_addr));
    833				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
    834							    upper_32_bits(adev->uvd.inst[i].gpu_addr));
    835				offset = size;
    836				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
    837							AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
    838
    839			}
    840
    841			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
    842
    843			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
    844						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
    845			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
    846						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
    847			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
    848			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
    849
    850			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
    851						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
    852			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
    853						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
    854			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
    855			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
    856						    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
    857
    858			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
    859			/* mc resume end*/
    860
    861			/* disable clock gating */
    862			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
    863							   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
    864
    865			/* disable interupt */
    866			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
    867							   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
    868
    869			/* stall UMC and register bus before resetting VCPU */
    870			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
    871							   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
    872							   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
    873
    874			/* put LMI, VCPU, RBC etc... into reset */
    875			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
    876						    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
    877							       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
    878							       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
    879							       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
    880							       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
    881							       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
    882							       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
    883							       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
    884
    885			/* initialize UVD memory controller */
    886			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
    887						    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
    888							       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
    889							       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
    890							       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
    891							       UVD_LMI_CTRL__REQ_MODE_MASK |
    892							       0x00100000L));
    893
    894			/* take all subblocks out of reset, except VCPU */
    895			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
    896						    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
    897
    898			/* enable VCPU clock */
    899			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
    900						    UVD_VCPU_CNTL__CLK_EN_MASK);
    901
    902			/* enable master interrupt */
    903			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
    904							   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
    905							   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
    906
    907			/* clear the bit 4 of UVD_STATUS */
    908			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
    909							   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
    910
    911			/* force RBC into idle state */
    912			size = order_base_2(ring->ring_size);
    913			tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
    914			tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
    915			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
    916
    917			ring = &adev->uvd.inst[i].ring_enc[0];
    918			ring->wptr = 0;
    919			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
    920			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
    921			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
    922
    923			/* boot up the VCPU */
    924			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
    925
    926			/* enable UMC */
    927			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
    928											   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
    929
    930			MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
    931		}
    932		/* add end packet */
    933		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
    934		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
    935		header->uvd_table_size = table_size;
    936
    937	}
    938	return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
    939}
    940
    941/**
    942 * uvd_v7_0_start - start UVD block
    943 *
    944 * @adev: amdgpu_device pointer
    945 *
    946 * Setup and start the UVD block
    947 */
    948static int uvd_v7_0_start(struct amdgpu_device *adev)
    949{
    950	struct amdgpu_ring *ring;
    951	uint32_t rb_bufsz, tmp;
    952	uint32_t lmi_swap_cntl;
    953	uint32_t mp_swap_cntl;
    954	int i, j, k, r;
    955
    956	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
    957		if (adev->uvd.harvest_config & (1 << k))
    958			continue;
    959		/* disable DPG */
    960		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
    961				~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
    962	}
    963
    964	/* disable byte swapping */
    965	lmi_swap_cntl = 0;
    966	mp_swap_cntl = 0;
    967
    968	uvd_v7_0_mc_resume(adev);
    969
    970	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
    971		if (adev->uvd.harvest_config & (1 << k))
    972			continue;
    973		ring = &adev->uvd.inst[k].ring;
    974		/* disable clock gating */
    975		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
    976				~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
    977
    978		/* disable interupt */
    979		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
    980				~UVD_MASTINT_EN__VCPU_EN_MASK);
    981
    982		/* stall UMC and register bus before resetting VCPU */
    983		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
    984				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
    985				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
    986		mdelay(1);
    987
    988		/* put LMI, VCPU, RBC etc... into reset */
    989		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
    990			UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
    991			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
    992			UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
    993			UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
    994			UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
    995			UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
    996			UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
    997			UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
    998		mdelay(5);
    999
   1000		/* initialize UVD memory controller */
   1001		WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
   1002			(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
   1003			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
   1004			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
   1005			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
   1006			UVD_LMI_CTRL__REQ_MODE_MASK |
   1007			0x00100000L);
   1008
   1009#ifdef __BIG_ENDIAN
   1010		/* swap (8 in 32) RB and IB */
   1011		lmi_swap_cntl = 0xa;
   1012		mp_swap_cntl = 0;
   1013#endif
   1014		WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
   1015		WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
   1016
   1017		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
   1018		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
   1019		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
   1020		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
   1021		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
   1022		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
   1023
   1024		/* take all subblocks out of reset, except VCPU */
   1025		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
   1026				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
   1027		mdelay(5);
   1028
   1029		/* enable VCPU clock */
   1030		WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
   1031				UVD_VCPU_CNTL__CLK_EN_MASK);
   1032
   1033		/* enable UMC */
   1034		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
   1035				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
   1036
   1037		/* boot up the VCPU */
   1038		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
   1039		mdelay(10);
   1040
   1041		for (i = 0; i < 10; ++i) {
   1042			uint32_t status;
   1043
   1044			for (j = 0; j < 100; ++j) {
   1045				status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
   1046				if (status & 2)
   1047					break;
   1048				mdelay(10);
   1049			}
   1050			r = 0;
   1051			if (status & 2)
   1052				break;
   1053
   1054			DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
   1055			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
   1056					UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
   1057					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
   1058			mdelay(10);
   1059			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
   1060					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
   1061			mdelay(10);
   1062			r = -1;
   1063		}
   1064
   1065		if (r) {
   1066			DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
   1067			return r;
   1068		}
   1069		/* enable master interrupt */
   1070		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
   1071			(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
   1072			~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
   1073
   1074		/* clear the bit 4 of UVD_STATUS */
   1075		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
   1076				~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
   1077
   1078		/* force RBC into idle state */
   1079		rb_bufsz = order_base_2(ring->ring_size);
   1080		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
   1081		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
   1082		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
   1083		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
   1084		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
   1085		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
   1086		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
   1087
   1088		/* set the write pointer delay */
   1089		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
   1090
   1091		/* set the wb address */
   1092		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
   1093				(upper_32_bits(ring->gpu_addr) >> 2));
   1094
   1095		/* program the RB_BASE for ring buffer */
   1096		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
   1097				lower_32_bits(ring->gpu_addr));
   1098		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
   1099				upper_32_bits(ring->gpu_addr));
   1100
   1101		/* Initialize the ring buffer's read and write pointers */
   1102		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
   1103
   1104		ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
   1105		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
   1106				lower_32_bits(ring->wptr));
   1107
   1108		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
   1109				~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
   1110
   1111		ring = &adev->uvd.inst[k].ring_enc[0];
   1112		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
   1113		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
   1114		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
   1115		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
   1116		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
   1117
   1118		ring = &adev->uvd.inst[k].ring_enc[1];
   1119		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
   1120		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
   1121		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
   1122		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
   1123		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
   1124	}
   1125	return 0;
   1126}
   1127
   1128/**
   1129 * uvd_v7_0_stop - stop UVD block
   1130 *
   1131 * @adev: amdgpu_device pointer
   1132 *
   1133 * stop the UVD block
   1134 */
   1135static void uvd_v7_0_stop(struct amdgpu_device *adev)
   1136{
   1137	uint8_t i = 0;
   1138
   1139	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
   1140		if (adev->uvd.harvest_config & (1 << i))
   1141			continue;
   1142		/* force RBC into idle state */
   1143		WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
   1144
   1145		/* Stall UMC and register bus before resetting VCPU */
   1146		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
   1147				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
   1148				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
   1149		mdelay(1);
   1150
   1151		/* put VCPU into reset */
   1152		WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
   1153				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
   1154		mdelay(5);
   1155
   1156		/* disable VCPU clock */
   1157		WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
   1158
   1159		/* Unstall UMC and register bus */
   1160		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
   1161				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
   1162	}
   1163}
   1164
   1165/**
   1166 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
   1167 *
   1168 * @ring: amdgpu_ring pointer
   1169 * @addr: address
   1170 * @seq: sequence number
   1171 * @flags: fence related flags
   1172 *
   1173 * Write a fence and a trap command to the ring.
   1174 */
   1175static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
   1176				     unsigned flags)
   1177{
   1178	struct amdgpu_device *adev = ring->adev;
   1179
   1180	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
   1181
   1182	amdgpu_ring_write(ring,
   1183		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
   1184	amdgpu_ring_write(ring, seq);
   1185	amdgpu_ring_write(ring,
   1186		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
   1187	amdgpu_ring_write(ring, addr & 0xffffffff);
   1188	amdgpu_ring_write(ring,
   1189		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
   1190	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
   1191	amdgpu_ring_write(ring,
   1192		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
   1193	amdgpu_ring_write(ring, 0);
   1194
   1195	amdgpu_ring_write(ring,
   1196		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
   1197	amdgpu_ring_write(ring, 0);
   1198	amdgpu_ring_write(ring,
   1199		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
   1200	amdgpu_ring_write(ring, 0);
   1201	amdgpu_ring_write(ring,
   1202		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
   1203	amdgpu_ring_write(ring, 2);
   1204}
   1205
   1206/**
   1207 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
   1208 *
   1209 * @ring: amdgpu_ring pointer
   1210 * @addr: address
   1211 * @seq: sequence number
   1212 * @flags: fence related flags
   1213 *
   1214 * Write enc a fence and a trap command to the ring.
   1215 */
   1216static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
   1217			u64 seq, unsigned flags)
   1218{
   1219
   1220	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
   1221
   1222	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
   1223	amdgpu_ring_write(ring, addr);
   1224	amdgpu_ring_write(ring, upper_32_bits(addr));
   1225	amdgpu_ring_write(ring, seq);
   1226	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
   1227}
   1228
   1229/**
   1230 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
   1231 *
   1232 * @ring: amdgpu_ring pointer
   1233 */
   1234static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
   1235{
   1236	/* The firmware doesn't seem to like touching registers at this point. */
   1237}
   1238
   1239/**
   1240 * uvd_v7_0_ring_test_ring - register write test
   1241 *
   1242 * @ring: amdgpu_ring pointer
   1243 *
   1244 * Test if we can successfully write to the context register
   1245 */
   1246static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
   1247{
   1248	struct amdgpu_device *adev = ring->adev;
   1249	uint32_t tmp = 0;
   1250	unsigned i;
   1251	int r;
   1252
   1253	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
   1254	r = amdgpu_ring_alloc(ring, 3);
   1255	if (r)
   1256		return r;
   1257
   1258	amdgpu_ring_write(ring,
   1259		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
   1260	amdgpu_ring_write(ring, 0xDEADBEEF);
   1261	amdgpu_ring_commit(ring);
   1262	for (i = 0; i < adev->usec_timeout; i++) {
   1263		tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
   1264		if (tmp == 0xDEADBEEF)
   1265			break;
   1266		udelay(1);
   1267	}
   1268
   1269	if (i >= adev->usec_timeout)
   1270		r = -ETIMEDOUT;
   1271
   1272	return r;
   1273}
   1274
   1275/**
   1276 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
   1277 *
   1278 * @p: the CS parser with the IBs
   1279 * @job: which job this ib is in
   1280 * @ib: which IB to patch
   1281 *
   1282 */
   1283static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
   1284					   struct amdgpu_job *job,
   1285					   struct amdgpu_ib *ib)
   1286{
   1287	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
   1288	unsigned i;
   1289
   1290	/* No patching necessary for the first instance */
   1291	if (!ring->me)
   1292		return 0;
   1293
   1294	for (i = 0; i < ib->length_dw; i += 2) {
   1295		uint32_t reg = amdgpu_ib_get_value(ib, i);
   1296
   1297		reg -= p->adev->reg_offset[UVD_HWIP][0][1];
   1298		reg += p->adev->reg_offset[UVD_HWIP][1][1];
   1299
   1300		amdgpu_ib_set_value(ib, i, reg);
   1301	}
   1302	return 0;
   1303}
   1304
   1305/**
   1306 * uvd_v7_0_ring_emit_ib - execute indirect buffer
   1307 *
   1308 * @ring: amdgpu_ring pointer
   1309 * @job: job to retrieve vmid from
   1310 * @ib: indirect buffer to execute
   1311 * @flags: unused
   1312 *
   1313 * Write ring commands to execute the indirect buffer
   1314 */
   1315static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
   1316				  struct amdgpu_job *job,
   1317				  struct amdgpu_ib *ib,
   1318				  uint32_t flags)
   1319{
   1320	struct amdgpu_device *adev = ring->adev;
   1321	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
   1322
   1323	amdgpu_ring_write(ring,
   1324		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
   1325	amdgpu_ring_write(ring, vmid);
   1326
   1327	amdgpu_ring_write(ring,
   1328		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
   1329	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
   1330	amdgpu_ring_write(ring,
   1331		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
   1332	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
   1333	amdgpu_ring_write(ring,
   1334		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
   1335	amdgpu_ring_write(ring, ib->length_dw);
   1336}
   1337
   1338/**
   1339 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
   1340 *
   1341 * @ring: amdgpu_ring pointer
   1342 * @job: job to retrive vmid from
   1343 * @ib: indirect buffer to execute
   1344 * @flags: unused
   1345 *
   1346 * Write enc ring commands to execute the indirect buffer
   1347 */
   1348static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
   1349					struct amdgpu_job *job,
   1350					struct amdgpu_ib *ib,
   1351					uint32_t flags)
   1352{
   1353	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
   1354
   1355	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
   1356	amdgpu_ring_write(ring, vmid);
   1357	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
   1358	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
   1359	amdgpu_ring_write(ring, ib->length_dw);
   1360}
   1361
   1362static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
   1363				    uint32_t reg, uint32_t val)
   1364{
   1365	struct amdgpu_device *adev = ring->adev;
   1366
   1367	amdgpu_ring_write(ring,
   1368		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
   1369	amdgpu_ring_write(ring, reg << 2);
   1370	amdgpu_ring_write(ring,
   1371		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
   1372	amdgpu_ring_write(ring, val);
   1373	amdgpu_ring_write(ring,
   1374		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
   1375	amdgpu_ring_write(ring, 8);
   1376}
   1377
   1378static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
   1379					uint32_t val, uint32_t mask)
   1380{
   1381	struct amdgpu_device *adev = ring->adev;
   1382
   1383	amdgpu_ring_write(ring,
   1384		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
   1385	amdgpu_ring_write(ring, reg << 2);
   1386	amdgpu_ring_write(ring,
   1387		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
   1388	amdgpu_ring_write(ring, val);
   1389	amdgpu_ring_write(ring,
   1390		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
   1391	amdgpu_ring_write(ring, mask);
   1392	amdgpu_ring_write(ring,
   1393		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
   1394	amdgpu_ring_write(ring, 12);
   1395}
   1396
   1397static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
   1398					unsigned vmid, uint64_t pd_addr)
   1399{
   1400	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
   1401	uint32_t data0, data1, mask;
   1402
   1403	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
   1404
   1405	/* wait for reg writes */
   1406	data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
   1407	data1 = lower_32_bits(pd_addr);
   1408	mask = 0xffffffff;
   1409	uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
   1410}
   1411
   1412static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
   1413{
   1414	struct amdgpu_device *adev = ring->adev;
   1415	int i;
   1416
   1417	WARN_ON(ring->wptr % 2 || count % 2);
   1418
   1419	for (i = 0; i < count / 2; i++) {
   1420		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
   1421		amdgpu_ring_write(ring, 0);
   1422	}
   1423}
   1424
   1425static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
   1426{
   1427	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
   1428}
   1429
   1430static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
   1431					    uint32_t reg, uint32_t val,
   1432					    uint32_t mask)
   1433{
   1434	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
   1435	amdgpu_ring_write(ring,	reg << 2);
   1436	amdgpu_ring_write(ring, mask);
   1437	amdgpu_ring_write(ring, val);
   1438}
   1439
   1440static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
   1441					    unsigned int vmid, uint64_t pd_addr)
   1442{
   1443	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
   1444
   1445	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
   1446
   1447	/* wait for reg writes */
   1448	uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
   1449					vmid * hub->ctx_addr_distance,
   1450					lower_32_bits(pd_addr), 0xffffffff);
   1451}
   1452
   1453static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
   1454					uint32_t reg, uint32_t val)
   1455{
   1456	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
   1457	amdgpu_ring_write(ring,	reg << 2);
   1458	amdgpu_ring_write(ring, val);
   1459}
   1460
   1461#if 0
   1462static bool uvd_v7_0_is_idle(void *handle)
   1463{
   1464	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1465
   1466	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
   1467}
   1468
   1469static int uvd_v7_0_wait_for_idle(void *handle)
   1470{
   1471	unsigned i;
   1472	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1473
   1474	for (i = 0; i < adev->usec_timeout; i++) {
   1475		if (uvd_v7_0_is_idle(handle))
   1476			return 0;
   1477	}
   1478	return -ETIMEDOUT;
   1479}
   1480
   1481#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
   1482static bool uvd_v7_0_check_soft_reset(void *handle)
   1483{
   1484	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1485	u32 srbm_soft_reset = 0;
   1486	u32 tmp = RREG32(mmSRBM_STATUS);
   1487
   1488	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
   1489	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
   1490	    (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
   1491		    AMDGPU_UVD_STATUS_BUSY_MASK))
   1492		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
   1493				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
   1494
   1495	if (srbm_soft_reset) {
   1496		adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
   1497		return true;
   1498	} else {
   1499		adev->uvd.inst[ring->me].srbm_soft_reset = 0;
   1500		return false;
   1501	}
   1502}
   1503
   1504static int uvd_v7_0_pre_soft_reset(void *handle)
   1505{
   1506	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1507
   1508	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
   1509		return 0;
   1510
   1511	uvd_v7_0_stop(adev);
   1512	return 0;
   1513}
   1514
   1515static int uvd_v7_0_soft_reset(void *handle)
   1516{
   1517	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1518	u32 srbm_soft_reset;
   1519
   1520	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
   1521		return 0;
   1522	srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
   1523
   1524	if (srbm_soft_reset) {
   1525		u32 tmp;
   1526
   1527		tmp = RREG32(mmSRBM_SOFT_RESET);
   1528		tmp |= srbm_soft_reset;
   1529		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
   1530		WREG32(mmSRBM_SOFT_RESET, tmp);
   1531		tmp = RREG32(mmSRBM_SOFT_RESET);
   1532
   1533		udelay(50);
   1534
   1535		tmp &= ~srbm_soft_reset;
   1536		WREG32(mmSRBM_SOFT_RESET, tmp);
   1537		tmp = RREG32(mmSRBM_SOFT_RESET);
   1538
   1539		/* Wait a little for things to settle down */
   1540		udelay(50);
   1541	}
   1542
   1543	return 0;
   1544}
   1545
   1546static int uvd_v7_0_post_soft_reset(void *handle)
   1547{
   1548	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1549
   1550	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
   1551		return 0;
   1552
   1553	mdelay(5);
   1554
   1555	return uvd_v7_0_start(adev);
   1556}
   1557#endif
   1558
   1559static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
   1560					struct amdgpu_irq_src *source,
   1561					unsigned type,
   1562					enum amdgpu_interrupt_state state)
   1563{
   1564	// TODO
   1565	return 0;
   1566}
   1567
   1568static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
   1569				      struct amdgpu_irq_src *source,
   1570				      struct amdgpu_iv_entry *entry)
   1571{
   1572	uint32_t ip_instance;
   1573
   1574	switch (entry->client_id) {
   1575	case SOC15_IH_CLIENTID_UVD:
   1576		ip_instance = 0;
   1577		break;
   1578	case SOC15_IH_CLIENTID_UVD1:
   1579		ip_instance = 1;
   1580		break;
   1581	default:
   1582		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
   1583		return 0;
   1584	}
   1585
   1586	DRM_DEBUG("IH: UVD TRAP\n");
   1587
   1588	switch (entry->src_id) {
   1589	case 124:
   1590		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
   1591		break;
   1592	case 119:
   1593		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
   1594		break;
   1595	case 120:
   1596		if (!amdgpu_sriov_vf(adev))
   1597			amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
   1598		break;
   1599	default:
   1600		DRM_ERROR("Unhandled interrupt: %d %d\n",
   1601			  entry->src_id, entry->src_data[0]);
   1602		break;
   1603	}
   1604
   1605	return 0;
   1606}
   1607
   1608#if 0
   1609static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
   1610{
   1611	uint32_t data, data1, data2, suvd_flags;
   1612
   1613	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
   1614	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
   1615	data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
   1616
   1617	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
   1618		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
   1619
   1620	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
   1621		     UVD_SUVD_CGC_GATE__SIT_MASK |
   1622		     UVD_SUVD_CGC_GATE__SMP_MASK |
   1623		     UVD_SUVD_CGC_GATE__SCM_MASK |
   1624		     UVD_SUVD_CGC_GATE__SDB_MASK;
   1625
   1626	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
   1627		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
   1628		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
   1629
   1630	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
   1631			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
   1632			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
   1633			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
   1634			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
   1635			UVD_CGC_CTRL__SYS_MODE_MASK |
   1636			UVD_CGC_CTRL__UDEC_MODE_MASK |
   1637			UVD_CGC_CTRL__MPEG2_MODE_MASK |
   1638			UVD_CGC_CTRL__REGS_MODE_MASK |
   1639			UVD_CGC_CTRL__RBC_MODE_MASK |
   1640			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
   1641			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
   1642			UVD_CGC_CTRL__IDCT_MODE_MASK |
   1643			UVD_CGC_CTRL__MPRD_MODE_MASK |
   1644			UVD_CGC_CTRL__MPC_MODE_MASK |
   1645			UVD_CGC_CTRL__LBSI_MODE_MASK |
   1646			UVD_CGC_CTRL__LRBBM_MODE_MASK |
   1647			UVD_CGC_CTRL__WCB_MODE_MASK |
   1648			UVD_CGC_CTRL__VCPU_MODE_MASK |
   1649			UVD_CGC_CTRL__JPEG_MODE_MASK |
   1650			UVD_CGC_CTRL__JPEG2_MODE_MASK |
   1651			UVD_CGC_CTRL__SCPU_MODE_MASK);
   1652	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
   1653			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
   1654			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
   1655			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
   1656			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
   1657	data1 |= suvd_flags;
   1658
   1659	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
   1660	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
   1661	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
   1662	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
   1663}
   1664
   1665static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
   1666{
   1667	uint32_t data, data1, cgc_flags, suvd_flags;
   1668
   1669	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
   1670	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
   1671
   1672	cgc_flags = UVD_CGC_GATE__SYS_MASK |
   1673		UVD_CGC_GATE__UDEC_MASK |
   1674		UVD_CGC_GATE__MPEG2_MASK |
   1675		UVD_CGC_GATE__RBC_MASK |
   1676		UVD_CGC_GATE__LMI_MC_MASK |
   1677		UVD_CGC_GATE__IDCT_MASK |
   1678		UVD_CGC_GATE__MPRD_MASK |
   1679		UVD_CGC_GATE__MPC_MASK |
   1680		UVD_CGC_GATE__LBSI_MASK |
   1681		UVD_CGC_GATE__LRBBM_MASK |
   1682		UVD_CGC_GATE__UDEC_RE_MASK |
   1683		UVD_CGC_GATE__UDEC_CM_MASK |
   1684		UVD_CGC_GATE__UDEC_IT_MASK |
   1685		UVD_CGC_GATE__UDEC_DB_MASK |
   1686		UVD_CGC_GATE__UDEC_MP_MASK |
   1687		UVD_CGC_GATE__WCB_MASK |
   1688		UVD_CGC_GATE__VCPU_MASK |
   1689		UVD_CGC_GATE__SCPU_MASK |
   1690		UVD_CGC_GATE__JPEG_MASK |
   1691		UVD_CGC_GATE__JPEG2_MASK;
   1692
   1693	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
   1694				UVD_SUVD_CGC_GATE__SIT_MASK |
   1695				UVD_SUVD_CGC_GATE__SMP_MASK |
   1696				UVD_SUVD_CGC_GATE__SCM_MASK |
   1697				UVD_SUVD_CGC_GATE__SDB_MASK;
   1698
   1699	data |= cgc_flags;
   1700	data1 |= suvd_flags;
   1701
   1702	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
   1703	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
   1704}
   1705
   1706static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
   1707{
   1708	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
   1709
   1710	if (enable)
   1711		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
   1712			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
   1713	else
   1714		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
   1715			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
   1716
   1717	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
   1718}
   1719
   1720
   1721static int uvd_v7_0_set_clockgating_state(void *handle,
   1722					  enum amd_clockgating_state state)
   1723{
   1724	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1725	bool enable = (state == AMD_CG_STATE_GATE);
   1726
   1727	uvd_v7_0_set_bypass_mode(adev, enable);
   1728
   1729	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
   1730		return 0;
   1731
   1732	if (enable) {
   1733		/* disable HW gating and enable Sw gating */
   1734		uvd_v7_0_set_sw_clock_gating(adev);
   1735	} else {
   1736		/* wait for STATUS to clear */
   1737		if (uvd_v7_0_wait_for_idle(handle))
   1738			return -EBUSY;
   1739
   1740		/* enable HW gates because UVD is idle */
   1741		/* uvd_v7_0_set_hw_clock_gating(adev); */
   1742	}
   1743
   1744	return 0;
   1745}
   1746
   1747static int uvd_v7_0_set_powergating_state(void *handle,
   1748					  enum amd_powergating_state state)
   1749{
   1750	/* This doesn't actually powergate the UVD block.
   1751	 * That's done in the dpm code via the SMC.  This
   1752	 * just re-inits the block as necessary.  The actual
   1753	 * gating still happens in the dpm code.  We should
   1754	 * revisit this when there is a cleaner line between
   1755	 * the smc and the hw blocks
   1756	 */
   1757	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
   1758
   1759	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
   1760		return 0;
   1761
   1762	WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
   1763
   1764	if (state == AMD_PG_STATE_GATE) {
   1765		uvd_v7_0_stop(adev);
   1766		return 0;
   1767	} else {
   1768		return uvd_v7_0_start(adev);
   1769	}
   1770}
   1771#endif
   1772
   1773static int uvd_v7_0_set_clockgating_state(void *handle,
   1774					  enum amd_clockgating_state state)
   1775{
   1776	/* needed for driver unload*/
   1777	return 0;
   1778}
   1779
   1780const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
   1781	.name = "uvd_v7_0",
   1782	.early_init = uvd_v7_0_early_init,
   1783	.late_init = NULL,
   1784	.sw_init = uvd_v7_0_sw_init,
   1785	.sw_fini = uvd_v7_0_sw_fini,
   1786	.hw_init = uvd_v7_0_hw_init,
   1787	.hw_fini = uvd_v7_0_hw_fini,
   1788	.suspend = uvd_v7_0_suspend,
   1789	.resume = uvd_v7_0_resume,
   1790	.is_idle = NULL /* uvd_v7_0_is_idle */,
   1791	.wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
   1792	.check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
   1793	.pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
   1794	.soft_reset = NULL /* uvd_v7_0_soft_reset */,
   1795	.post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
   1796	.set_clockgating_state = uvd_v7_0_set_clockgating_state,
   1797	.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
   1798};
   1799
   1800static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
   1801	.type = AMDGPU_RING_TYPE_UVD,
   1802	.align_mask = 0xf,
   1803	.support_64bit_ptrs = false,
   1804	.no_user_fence = true,
   1805	.vmhub = AMDGPU_MMHUB_0,
   1806	.get_rptr = uvd_v7_0_ring_get_rptr,
   1807	.get_wptr = uvd_v7_0_ring_get_wptr,
   1808	.set_wptr = uvd_v7_0_ring_set_wptr,
   1809	.patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
   1810	.emit_frame_size =
   1811		6 + /* hdp invalidate */
   1812		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
   1813		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
   1814		8 + /* uvd_v7_0_ring_emit_vm_flush */
   1815		14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
   1816	.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
   1817	.emit_ib = uvd_v7_0_ring_emit_ib,
   1818	.emit_fence = uvd_v7_0_ring_emit_fence,
   1819	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
   1820	.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
   1821	.test_ring = uvd_v7_0_ring_test_ring,
   1822	.test_ib = amdgpu_uvd_ring_test_ib,
   1823	.insert_nop = uvd_v7_0_ring_insert_nop,
   1824	.pad_ib = amdgpu_ring_generic_pad_ib,
   1825	.begin_use = amdgpu_uvd_ring_begin_use,
   1826	.end_use = amdgpu_uvd_ring_end_use,
   1827	.emit_wreg = uvd_v7_0_ring_emit_wreg,
   1828	.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
   1829	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
   1830};
   1831
   1832static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
   1833	.type = AMDGPU_RING_TYPE_UVD_ENC,
   1834	.align_mask = 0x3f,
   1835	.nop = HEVC_ENC_CMD_NO_OP,
   1836	.support_64bit_ptrs = false,
   1837	.no_user_fence = true,
   1838	.vmhub = AMDGPU_MMHUB_0,
   1839	.get_rptr = uvd_v7_0_enc_ring_get_rptr,
   1840	.get_wptr = uvd_v7_0_enc_ring_get_wptr,
   1841	.set_wptr = uvd_v7_0_enc_ring_set_wptr,
   1842	.emit_frame_size =
   1843		3 + 3 + /* hdp flush / invalidate */
   1844		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
   1845		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
   1846		4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
   1847		5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
   1848		1, /* uvd_v7_0_enc_ring_insert_end */
   1849	.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
   1850	.emit_ib = uvd_v7_0_enc_ring_emit_ib,
   1851	.emit_fence = uvd_v7_0_enc_ring_emit_fence,
   1852	.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
   1853	.test_ring = uvd_v7_0_enc_ring_test_ring,
   1854	.test_ib = uvd_v7_0_enc_ring_test_ib,
   1855	.insert_nop = amdgpu_ring_insert_nop,
   1856	.insert_end = uvd_v7_0_enc_ring_insert_end,
   1857	.pad_ib = amdgpu_ring_generic_pad_ib,
   1858	.begin_use = amdgpu_uvd_ring_begin_use,
   1859	.end_use = amdgpu_uvd_ring_end_use,
   1860	.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
   1861	.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
   1862	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
   1863};
   1864
   1865static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
   1866{
   1867	int i;
   1868
   1869	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
   1870		if (adev->uvd.harvest_config & (1 << i))
   1871			continue;
   1872		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
   1873		adev->uvd.inst[i].ring.me = i;
   1874		DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
   1875	}
   1876}
   1877
   1878static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
   1879{
   1880	int i, j;
   1881
   1882	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
   1883		if (adev->uvd.harvest_config & (1 << j))
   1884			continue;
   1885		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
   1886			adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
   1887			adev->uvd.inst[j].ring_enc[i].me = j;
   1888		}
   1889
   1890		DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
   1891	}
   1892}
   1893
   1894static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
   1895	.set = uvd_v7_0_set_interrupt_state,
   1896	.process = uvd_v7_0_process_interrupt,
   1897};
   1898
   1899static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
   1900{
   1901	int i;
   1902
   1903	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
   1904		if (adev->uvd.harvest_config & (1 << i))
   1905			continue;
   1906		adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
   1907		adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
   1908	}
   1909}
   1910
   1911const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
   1912{
   1913		.type = AMD_IP_BLOCK_TYPE_UVD,
   1914		.major = 7,
   1915		.minor = 0,
   1916		.rev = 0,
   1917		.funcs = &uvd_v7_0_ip_funcs,
   1918};