cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

jpeg_v2_5.c (22118B)


      1/*
      2 * Copyright 2019 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#include "amdgpu.h"
     25#include "amdgpu_jpeg.h"
     26#include "soc15.h"
     27#include "soc15d.h"
     28#include "jpeg_v2_0.h"
     29#include "jpeg_v2_5.h"
     30
     31#include "vcn/vcn_2_5_offset.h"
     32#include "vcn/vcn_2_5_sh_mask.h"
     33#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
     34
     35#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET			0x401f
     36
     37#define JPEG25_MAX_HW_INSTANCES_ARCTURUS			2
     38
     39static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
     40static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
     41static int jpeg_v2_5_set_powergating_state(void *handle,
     42				enum amd_powergating_state state);
     43static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev);
     44
     45static int amdgpu_ih_clientid_jpeg[] = {
     46	SOC15_IH_CLIENTID_VCN,
     47	SOC15_IH_CLIENTID_VCN1
     48};
     49
     50/**
     51 * jpeg_v2_5_early_init - set function pointers
     52 *
     53 * @handle: amdgpu_device pointer
     54 *
     55 * Set ring and irq function pointers
     56 */
     57static int jpeg_v2_5_early_init(void *handle)
     58{
     59	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     60	u32 harvest;
     61	int i;
     62
     63	adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
     64	for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
     65		harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
     66		if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
     67			adev->jpeg.harvest_config |= 1 << i;
     68	}
     69	if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 |
     70					 AMDGPU_JPEG_HARVEST_JPEG1))
     71		return -ENOENT;
     72
     73	jpeg_v2_5_set_dec_ring_funcs(adev);
     74	jpeg_v2_5_set_irq_funcs(adev);
     75	jpeg_v2_5_set_ras_funcs(adev);
     76
     77	return 0;
     78}
     79
     80/**
     81 * jpeg_v2_5_sw_init - sw init for JPEG block
     82 *
     83 * @handle: amdgpu_device pointer
     84 *
     85 * Load firmware and sw initialization
     86 */
     87static int jpeg_v2_5_sw_init(void *handle)
     88{
     89	struct amdgpu_ring *ring;
     90	int i, r;
     91	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     92
     93	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
     94		if (adev->jpeg.harvest_config & (1 << i))
     95			continue;
     96
     97		/* JPEG TRAP */
     98		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
     99				VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq);
    100		if (r)
    101			return r;
    102
    103		/* JPEG DJPEG POISON EVENT */
    104		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
    105			VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq);
    106		if (r)
    107			return r;
    108
    109		/* JPEG EJPEG POISON EVENT */
    110		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
    111			VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq);
    112		if (r)
    113			return r;
    114	}
    115
    116	r = amdgpu_jpeg_sw_init(adev);
    117	if (r)
    118		return r;
    119
    120	r = amdgpu_jpeg_resume(adev);
    121	if (r)
    122		return r;
    123
    124	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
    125		if (adev->jpeg.harvest_config & (1 << i))
    126			continue;
    127
    128		ring = &adev->jpeg.inst[i].ring_dec;
    129		ring->use_doorbell = true;
    130		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
    131		sprintf(ring->name, "jpeg_dec_%d", i);
    132		r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
    133				     0, AMDGPU_RING_PRIO_DEFAULT, NULL);
    134		if (r)
    135			return r;
    136
    137		adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
    138		adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH);
    139	}
    140
    141	return 0;
    142}
    143
    144/**
    145 * jpeg_v2_5_sw_fini - sw fini for JPEG block
    146 *
    147 * @handle: amdgpu_device pointer
    148 *
    149 * JPEG suspend and free up sw allocation
    150 */
    151static int jpeg_v2_5_sw_fini(void *handle)
    152{
    153	int r;
    154	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    155
    156	r = amdgpu_jpeg_suspend(adev);
    157	if (r)
    158		return r;
    159
    160	r = amdgpu_jpeg_sw_fini(adev);
    161
    162	return r;
    163}
    164
    165/**
    166 * jpeg_v2_5_hw_init - start and test JPEG block
    167 *
    168 * @handle: amdgpu_device pointer
    169 *
    170 */
    171static int jpeg_v2_5_hw_init(void *handle)
    172{
    173	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    174	struct amdgpu_ring *ring;
    175	int i, r;
    176
    177	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
    178		if (adev->jpeg.harvest_config & (1 << i))
    179			continue;
    180
    181		ring = &adev->jpeg.inst[i].ring_dec;
    182		adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
    183			(adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
    184
    185		r = amdgpu_ring_test_helper(ring);
    186		if (r)
    187			return r;
    188	}
    189
    190	DRM_INFO("JPEG decode initialized successfully.\n");
    191
    192	return 0;
    193}
    194
    195/**
    196 * jpeg_v2_5_hw_fini - stop the hardware block
    197 *
    198 * @handle: amdgpu_device pointer
    199 *
    200 * Stop the JPEG block, mark ring as not ready any more
    201 */
    202static int jpeg_v2_5_hw_fini(void *handle)
    203{
    204	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    205	int i;
    206
    207	cancel_delayed_work_sync(&adev->vcn.idle_work);
    208
    209	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
    210		if (adev->jpeg.harvest_config & (1 << i))
    211			continue;
    212
    213		if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
    214		      RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
    215			jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
    216	}
    217
    218	return 0;
    219}
    220
    221/**
    222 * jpeg_v2_5_suspend - suspend JPEG block
    223 *
    224 * @handle: amdgpu_device pointer
    225 *
    226 * HW fini and suspend JPEG block
    227 */
    228static int jpeg_v2_5_suspend(void *handle)
    229{
    230	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    231	int r;
    232
    233	r = jpeg_v2_5_hw_fini(adev);
    234	if (r)
    235		return r;
    236
    237	r = amdgpu_jpeg_suspend(adev);
    238
    239	return r;
    240}
    241
    242/**
    243 * jpeg_v2_5_resume - resume JPEG block
    244 *
    245 * @handle: amdgpu_device pointer
    246 *
    247 * Resume firmware and hw init JPEG block
    248 */
    249static int jpeg_v2_5_resume(void *handle)
    250{
    251	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    252	int r;
    253
    254	r = amdgpu_jpeg_resume(adev);
    255	if (r)
    256		return r;
    257
    258	r = jpeg_v2_5_hw_init(adev);
    259
    260	return r;
    261}
    262
    263static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device *adev, int inst)
    264{
    265	uint32_t data;
    266
    267	data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
    268	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
    269		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
    270	else
    271		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
    272
    273	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
    274	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
    275	WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
    276
    277	data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
    278	data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
    279		| JPEG_CGC_GATE__JPEG2_DEC_MASK
    280		| JPEG_CGC_GATE__JMCIF_MASK
    281		| JPEG_CGC_GATE__JRBBM_MASK);
    282	WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
    283
    284	data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
    285	data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
    286		| JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
    287		| JPEG_CGC_CTRL__JMCIF_MODE_MASK
    288		| JPEG_CGC_CTRL__JRBBM_MODE_MASK);
    289	WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
    290}
    291
    292static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
    293{
    294	uint32_t data;
    295
    296	data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
    297	data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
    298		|JPEG_CGC_GATE__JPEG2_DEC_MASK
    299		|JPEG_CGC_GATE__JPEG_ENC_MASK
    300		|JPEG_CGC_GATE__JMCIF_MASK
    301		|JPEG_CGC_GATE__JRBBM_MASK);
    302	WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
    303}
    304
    305/**
    306 * jpeg_v2_5_start - start JPEG block
    307 *
    308 * @adev: amdgpu_device pointer
    309 *
    310 * Setup and start the JPEG block
    311 */
    312static int jpeg_v2_5_start(struct amdgpu_device *adev)
    313{
    314	struct amdgpu_ring *ring;
    315	int i;
    316
    317	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
    318		if (adev->jpeg.harvest_config & (1 << i))
    319			continue;
    320
    321		ring = &adev->jpeg.inst[i].ring_dec;
    322		/* disable anti hang mechanism */
    323		WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0,
    324			~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
    325
    326		/* JPEG disable CGC */
    327		jpeg_v2_5_disable_clock_gating(adev, i);
    328
    329		/* MJPEG global tiling registers */
    330		WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
    331			adev->gfx.config.gb_addr_config);
    332		WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
    333			adev->gfx.config.gb_addr_config);
    334
    335		/* enable JMI channel */
    336		WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0,
    337			~UVD_JMI_CNTL__SOFT_RESET_MASK);
    338
    339		/* enable System Interrupt for JRBC */
    340		WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN),
    341			JPEG_SYS_INT_EN__DJRBC_MASK,
    342			~JPEG_SYS_INT_EN__DJRBC_MASK);
    343
    344		WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0);
    345		WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
    346		WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
    347			lower_32_bits(ring->gpu_addr));
    348		WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
    349			upper_32_bits(ring->gpu_addr));
    350		WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0);
    351		WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0);
    352		WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
    353		WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
    354		ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR);
    355	}
    356
    357	return 0;
    358}
    359
    360/**
    361 * jpeg_v2_5_stop - stop JPEG block
    362 *
    363 * @adev: amdgpu_device pointer
    364 *
    365 * stop the JPEG block
    366 */
    367static int jpeg_v2_5_stop(struct amdgpu_device *adev)
    368{
    369	int i;
    370
    371	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
    372		if (adev->jpeg.harvest_config & (1 << i))
    373			continue;
    374
    375		/* reset JMI */
    376		WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL),
    377			UVD_JMI_CNTL__SOFT_RESET_MASK,
    378			~UVD_JMI_CNTL__SOFT_RESET_MASK);
    379
    380		jpeg_v2_5_enable_clock_gating(adev, i);
    381
    382		/* enable anti hang mechanism */
    383		WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS),
    384			UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
    385			~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
    386	}
    387
    388	return 0;
    389}
    390
    391/**
    392 * jpeg_v2_5_dec_ring_get_rptr - get read pointer
    393 *
    394 * @ring: amdgpu_ring pointer
    395 *
    396 * Returns the current hardware read pointer
    397 */
    398static uint64_t jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
    399{
    400	struct amdgpu_device *adev = ring->adev;
    401
    402	return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_RPTR);
    403}
    404
    405/**
    406 * jpeg_v2_5_dec_ring_get_wptr - get write pointer
    407 *
    408 * @ring: amdgpu_ring pointer
    409 *
    410 * Returns the current hardware write pointer
    411 */
    412static uint64_t jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
    413{
    414	struct amdgpu_device *adev = ring->adev;
    415
    416	if (ring->use_doorbell)
    417		return *ring->wptr_cpu_addr;
    418	else
    419		return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR);
    420}
    421
    422/**
    423 * jpeg_v2_5_dec_ring_set_wptr - set write pointer
    424 *
    425 * @ring: amdgpu_ring pointer
    426 *
    427 * Commits the write pointer to the hardware
    428 */
    429static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
    430{
    431	struct amdgpu_device *adev = ring->adev;
    432
    433	if (ring->use_doorbell) {
    434		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
    435		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
    436	} else {
    437		WREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
    438	}
    439}
    440
    441/**
    442 * jpeg_v2_6_dec_ring_insert_start - insert a start command
    443 *
    444 * @ring: amdgpu_ring pointer
    445 *
    446 * Write a start command to the ring.
    447 */
    448static void jpeg_v2_6_dec_ring_insert_start(struct amdgpu_ring *ring)
    449{
    450	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
    451		0, 0, PACKETJ_TYPE0));
    452	amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
    453
    454	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
    455		0, 0, PACKETJ_TYPE0));
    456	amdgpu_ring_write(ring, 0x80000000 | (1 << (ring->me * 2 + 14)));
    457}
    458
    459/**
    460 * jpeg_v2_6_dec_ring_insert_end - insert a end command
    461 *
    462 * @ring: amdgpu_ring pointer
    463 *
    464 * Write a end command to the ring.
    465 */
    466static void jpeg_v2_6_dec_ring_insert_end(struct amdgpu_ring *ring)
    467{
    468	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
    469		0, 0, PACKETJ_TYPE0));
    470	amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
    471
    472	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
    473		0, 0, PACKETJ_TYPE0));
    474	amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14)));
    475}
    476
    477static bool jpeg_v2_5_is_idle(void *handle)
    478{
    479	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    480	int i, ret = 1;
    481
    482	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
    483		if (adev->jpeg.harvest_config & (1 << i))
    484			continue;
    485
    486		ret &= (((RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS) &
    487			UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
    488			UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
    489	}
    490
    491	return ret;
    492}
    493
    494static int jpeg_v2_5_wait_for_idle(void *handle)
    495{
    496	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    497	int i, ret;
    498
    499	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
    500		if (adev->jpeg.harvest_config & (1 << i))
    501			continue;
    502
    503		ret = SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS,
    504			UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
    505			UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
    506		if (ret)
    507			return ret;
    508	}
    509
    510	return 0;
    511}
    512
    513static int jpeg_v2_5_set_clockgating_state(void *handle,
    514					  enum amd_clockgating_state state)
    515{
    516	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    517	bool enable = (state == AMD_CG_STATE_GATE);
    518	int i;
    519
    520	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
    521		if (adev->jpeg.harvest_config & (1 << i))
    522			continue;
    523
    524		if (enable) {
    525			if (!jpeg_v2_5_is_idle(handle))
    526				return -EBUSY;
    527			jpeg_v2_5_enable_clock_gating(adev, i);
    528		} else {
    529			jpeg_v2_5_disable_clock_gating(adev, i);
    530		}
    531	}
    532
    533	return 0;
    534}
    535
    536static int jpeg_v2_5_set_powergating_state(void *handle,
    537					  enum amd_powergating_state state)
    538{
    539	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    540	int ret;
    541
    542	if(state == adev->jpeg.cur_state)
    543		return 0;
    544
    545	if (state == AMD_PG_STATE_GATE)
    546		ret = jpeg_v2_5_stop(adev);
    547	else
    548		ret = jpeg_v2_5_start(adev);
    549
    550	if(!ret)
    551		adev->jpeg.cur_state = state;
    552
    553	return ret;
    554}
    555
    556static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
    557					struct amdgpu_irq_src *source,
    558					unsigned type,
    559					enum amdgpu_interrupt_state state)
    560{
    561	return 0;
    562}
    563
    564static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
    565				      struct amdgpu_irq_src *source,
    566				      struct amdgpu_iv_entry *entry)
    567{
    568	uint32_t ip_instance;
    569
    570	switch (entry->client_id) {
    571	case SOC15_IH_CLIENTID_VCN:
    572		ip_instance = 0;
    573		break;
    574	case SOC15_IH_CLIENTID_VCN1:
    575		ip_instance = 1;
    576		break;
    577	default:
    578		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
    579		return 0;
    580	}
    581
    582	DRM_DEBUG("IH: JPEG TRAP\n");
    583
    584	switch (entry->src_id) {
    585	case VCN_2_0__SRCID__JPEG_DECODE:
    586		amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
    587		break;
    588	case VCN_2_6__SRCID_DJPEG0_POISON:
    589	case VCN_2_6__SRCID_EJPEG0_POISON:
    590		amdgpu_jpeg_process_poison_irq(adev, source, entry);
    591		break;
    592	default:
    593		DRM_ERROR("Unhandled interrupt: %d %d\n",
    594			  entry->src_id, entry->src_data[0]);
    595		break;
    596	}
    597
    598	return 0;
    599}
    600
    601static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
    602	.name = "jpeg_v2_5",
    603	.early_init = jpeg_v2_5_early_init,
    604	.late_init = NULL,
    605	.sw_init = jpeg_v2_5_sw_init,
    606	.sw_fini = jpeg_v2_5_sw_fini,
    607	.hw_init = jpeg_v2_5_hw_init,
    608	.hw_fini = jpeg_v2_5_hw_fini,
    609	.suspend = jpeg_v2_5_suspend,
    610	.resume = jpeg_v2_5_resume,
    611	.is_idle = jpeg_v2_5_is_idle,
    612	.wait_for_idle = jpeg_v2_5_wait_for_idle,
    613	.check_soft_reset = NULL,
    614	.pre_soft_reset = NULL,
    615	.soft_reset = NULL,
    616	.post_soft_reset = NULL,
    617	.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
    618	.set_powergating_state = jpeg_v2_5_set_powergating_state,
    619};
    620
    621static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
    622	.name = "jpeg_v2_6",
    623	.early_init = jpeg_v2_5_early_init,
    624	.late_init = NULL,
    625	.sw_init = jpeg_v2_5_sw_init,
    626	.sw_fini = jpeg_v2_5_sw_fini,
    627	.hw_init = jpeg_v2_5_hw_init,
    628	.hw_fini = jpeg_v2_5_hw_fini,
    629	.suspend = jpeg_v2_5_suspend,
    630	.resume = jpeg_v2_5_resume,
    631	.is_idle = jpeg_v2_5_is_idle,
    632	.wait_for_idle = jpeg_v2_5_wait_for_idle,
    633	.check_soft_reset = NULL,
    634	.pre_soft_reset = NULL,
    635	.soft_reset = NULL,
    636	.post_soft_reset = NULL,
    637	.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
    638	.set_powergating_state = jpeg_v2_5_set_powergating_state,
    639};
    640
    641static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
    642	.type = AMDGPU_RING_TYPE_VCN_JPEG,
    643	.align_mask = 0xf,
    644	.vmhub = AMDGPU_MMHUB_1,
    645	.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
    646	.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
    647	.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
    648	.emit_frame_size =
    649		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
    650		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
    651		8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */
    652		18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */
    653		8 + 16,
    654	.emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */
    655	.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
    656	.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
    657	.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
    658	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
    659	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
    660	.insert_nop = jpeg_v2_0_dec_ring_nop,
    661	.insert_start = jpeg_v2_0_dec_ring_insert_start,
    662	.insert_end = jpeg_v2_0_dec_ring_insert_end,
    663	.pad_ib = amdgpu_ring_generic_pad_ib,
    664	.begin_use = amdgpu_jpeg_ring_begin_use,
    665	.end_use = amdgpu_jpeg_ring_end_use,
    666	.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
    667	.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
    668	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
    669};
    670
    671static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
    672	.type = AMDGPU_RING_TYPE_VCN_JPEG,
    673	.align_mask = 0xf,
    674	.vmhub = AMDGPU_MMHUB_0,
    675	.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
    676	.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
    677	.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
    678	.emit_frame_size =
    679		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
    680		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
    681		8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */
    682		18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */
    683		8 + 16,
    684	.emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */
    685	.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
    686	.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
    687	.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
    688	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
    689	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
    690	.insert_nop = jpeg_v2_0_dec_ring_nop,
    691	.insert_start = jpeg_v2_6_dec_ring_insert_start,
    692	.insert_end = jpeg_v2_6_dec_ring_insert_end,
    693	.pad_ib = amdgpu_ring_generic_pad_ib,
    694	.begin_use = amdgpu_jpeg_ring_begin_use,
    695	.end_use = amdgpu_jpeg_ring_end_use,
    696	.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
    697	.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
    698	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
    699};
    700
    701static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
    702{
    703	int i;
    704
    705	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
    706		if (adev->jpeg.harvest_config & (1 << i))
    707			continue;
    708		if (adev->asic_type == CHIP_ARCTURUS)
    709			adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs;
    710		else  /* CHIP_ALDEBARAN */
    711			adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_6_dec_ring_vm_funcs;
    712		adev->jpeg.inst[i].ring_dec.me = i;
    713		DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i);
    714	}
    715}
    716
    717static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
    718	.set = jpeg_v2_5_set_interrupt_state,
    719	.process = jpeg_v2_5_process_interrupt,
    720};
    721
    722static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
    723{
    724	int i;
    725
    726	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
    727		if (adev->jpeg.harvest_config & (1 << i))
    728			continue;
    729
    730		adev->jpeg.inst[i].irq.num_types = 1;
    731		adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
    732	}
    733}
    734
    735const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
    736{
    737		.type = AMD_IP_BLOCK_TYPE_JPEG,
    738		.major = 2,
    739		.minor = 5,
    740		.rev = 0,
    741		.funcs = &jpeg_v2_5_ip_funcs,
    742};
    743
    744const struct amdgpu_ip_block_version jpeg_v2_6_ip_block =
    745{
    746		.type = AMD_IP_BLOCK_TYPE_JPEG,
    747		.major = 2,
    748		.minor = 6,
    749		.rev = 0,
    750		.funcs = &jpeg_v2_6_ip_funcs,
    751};
    752
    753static uint32_t jpeg_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
    754		uint32_t instance, uint32_t sub_block)
    755{
    756	uint32_t poison_stat = 0, reg_value = 0;
    757
    758	switch (sub_block) {
    759	case AMDGPU_JPEG_V2_6_JPEG0:
    760		reg_value = RREG32_SOC15(JPEG, instance, mmUVD_RAS_JPEG0_STATUS);
    761		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
    762		break;
    763	case AMDGPU_JPEG_V2_6_JPEG1:
    764		reg_value = RREG32_SOC15(JPEG, instance, mmUVD_RAS_JPEG1_STATUS);
    765		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
    766		break;
    767	default:
    768		break;
    769	}
    770
    771	if (poison_stat)
    772		dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
    773			instance, sub_block);
    774
    775	return poison_stat;
    776}
    777
    778static bool jpeg_v2_6_query_ras_poison_status(struct amdgpu_device *adev)
    779{
    780	uint32_t inst = 0, sub = 0, poison_stat = 0;
    781
    782	for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
    783		for (sub = 0; sub < AMDGPU_JPEG_V2_6_MAX_SUB_BLOCK; sub++)
    784			poison_stat +=
    785			jpeg_v2_6_query_poison_by_instance(adev, inst, sub);
    786
    787	return !!poison_stat;
    788}
    789
    790const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = {
    791	.query_poison_status = jpeg_v2_6_query_ras_poison_status,
    792};
    793
    794static struct amdgpu_jpeg_ras jpeg_v2_6_ras = {
    795	.ras_block = {
    796		.hw_ops = &jpeg_v2_6_ras_hw_ops,
    797	},
    798};
    799
    800static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev)
    801{
    802	switch (adev->ip_versions[JPEG_HWIP][0]) {
    803	case IP_VERSION(2, 6, 0):
    804		adev->jpeg.ras = &jpeg_v2_6_ras;
    805		break;
    806	default:
    807		break;
    808	}
    809
    810	if (adev->jpeg.ras) {
    811		amdgpu_ras_register_ras_block(adev, &adev->jpeg.ras->ras_block);
    812
    813		strcpy(adev->jpeg.ras->ras_block.ras_comm.name, "jpeg");
    814		adev->jpeg.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
    815		adev->jpeg.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
    816		adev->jpeg.ras_if = &adev->jpeg.ras->ras_block.ras_comm;
    817
    818		/* If don't define special ras_late_init function, use default ras_late_init */
    819		if (!adev->jpeg.ras->ras_block.ras_late_init)
    820			adev->jpeg.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
    821	}
    822}