cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_vcn.c (31842B)


      1/*
      2 * Copyright 2016 Advanced Micro Devices, Inc.
      3 * All Rights Reserved.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining a
      6 * copy of this software and associated documentation files (the
      7 * "Software"), to deal in the Software without restriction, including
      8 * without limitation the rights to use, copy, modify, merge, publish,
      9 * distribute, sub license, and/or sell copies of the Software, and to
     10 * permit persons to whom the Software is furnished to do so, subject to
     11 * the following conditions:
     12 *
     13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
     20 *
     21 * The above copyright notice and this permission notice (including the
     22 * next paragraph) shall be included in all copies or substantial portions
     23 * of the Software.
     24 *
     25 */
     26
     27#include <linux/firmware.h>
     28#include <linux/module.h>
     29#include <linux/pci.h>
     30#include <linux/debugfs.h>
     31#include <drm/drm_drv.h>
     32
     33#include "amdgpu.h"
     34#include "amdgpu_pm.h"
     35#include "amdgpu_vcn.h"
     36#include "soc15d.h"
     37
     38/* Firmware Names */
     39#define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
     40#define FIRMWARE_PICASSO	"amdgpu/picasso_vcn.bin"
     41#define FIRMWARE_RAVEN2		"amdgpu/raven2_vcn.bin"
     42#define FIRMWARE_ARCTURUS	"amdgpu/arcturus_vcn.bin"
     43#define FIRMWARE_RENOIR		"amdgpu/renoir_vcn.bin"
     44#define FIRMWARE_GREEN_SARDINE	"amdgpu/green_sardine_vcn.bin"
     45#define FIRMWARE_NAVI10		"amdgpu/navi10_vcn.bin"
     46#define FIRMWARE_NAVI14		"amdgpu/navi14_vcn.bin"
     47#define FIRMWARE_NAVI12		"amdgpu/navi12_vcn.bin"
     48#define FIRMWARE_SIENNA_CICHLID	"amdgpu/sienna_cichlid_vcn.bin"
     49#define FIRMWARE_NAVY_FLOUNDER	"amdgpu/navy_flounder_vcn.bin"
     50#define FIRMWARE_VANGOGH	"amdgpu/vangogh_vcn.bin"
     51#define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
     52#define FIRMWARE_ALDEBARAN	"amdgpu/aldebaran_vcn.bin"
     53#define FIRMWARE_BEIGE_GOBY	"amdgpu/beige_goby_vcn.bin"
     54#define FIRMWARE_YELLOW_CARP	"amdgpu/yellow_carp_vcn.bin"
     55#define FIRMWARE_VCN_3_1_2	"amdgpu/vcn_3_1_2.bin"
     56#define FIRMWARE_VCN4_0_0	"amdgpu/vcn_4_0_0.bin"
     57#define FIRMWARE_VCN4_0_4      "amdgpu/vcn_4_0_4.bin"
     58
     59MODULE_FIRMWARE(FIRMWARE_RAVEN);
     60MODULE_FIRMWARE(FIRMWARE_PICASSO);
     61MODULE_FIRMWARE(FIRMWARE_RAVEN2);
     62MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
     63MODULE_FIRMWARE(FIRMWARE_RENOIR);
     64MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
     65MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
     66MODULE_FIRMWARE(FIRMWARE_NAVI10);
     67MODULE_FIRMWARE(FIRMWARE_NAVI14);
     68MODULE_FIRMWARE(FIRMWARE_NAVI12);
     69MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
     70MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
     71MODULE_FIRMWARE(FIRMWARE_VANGOGH);
     72MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
     73MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
     74MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
     75MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
     76MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
     77MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
     78
     79static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
     80
     81int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
     82{
     83	unsigned long bo_size;
     84	const char *fw_name;
     85	const struct common_firmware_header *hdr;
     86	unsigned char fw_check;
     87	unsigned int fw_shared_size, log_offset;
     88	int i, r;
     89
     90	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
     91	mutex_init(&adev->vcn.vcn_pg_lock);
     92	mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
     93	atomic_set(&adev->vcn.total_submission_cnt, 0);
     94	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
     95		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
     96
     97	switch (adev->ip_versions[UVD_HWIP][0]) {
     98	case IP_VERSION(1, 0, 0):
     99	case IP_VERSION(1, 0, 1):
    100		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
    101			fw_name = FIRMWARE_RAVEN2;
    102		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
    103			fw_name = FIRMWARE_PICASSO;
    104		else
    105			fw_name = FIRMWARE_RAVEN;
    106		break;
    107	case IP_VERSION(2, 5, 0):
    108		fw_name = FIRMWARE_ARCTURUS;
    109		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
    110		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
    111			adev->vcn.indirect_sram = true;
    112		break;
    113	case IP_VERSION(2, 2, 0):
    114		if (adev->apu_flags & AMD_APU_IS_RENOIR)
    115			fw_name = FIRMWARE_RENOIR;
    116		else
    117			fw_name = FIRMWARE_GREEN_SARDINE;
    118
    119		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
    120		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
    121			adev->vcn.indirect_sram = true;
    122		break;
    123	case IP_VERSION(2, 6, 0):
    124		fw_name = FIRMWARE_ALDEBARAN;
    125		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
    126		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
    127			adev->vcn.indirect_sram = true;
    128		break;
    129	case IP_VERSION(2, 0, 0):
    130		fw_name = FIRMWARE_NAVI10;
    131		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
    132		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
    133			adev->vcn.indirect_sram = true;
    134		break;
    135	case IP_VERSION(2, 0, 2):
    136		if (adev->asic_type == CHIP_NAVI12)
    137			fw_name = FIRMWARE_NAVI12;
    138		else
    139			fw_name = FIRMWARE_NAVI14;
    140		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
    141		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
    142			adev->vcn.indirect_sram = true;
    143		break;
    144	case IP_VERSION(3, 0, 0):
    145	case IP_VERSION(3, 0, 64):
    146	case IP_VERSION(3, 0, 192):
    147		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
    148			fw_name = FIRMWARE_SIENNA_CICHLID;
    149		else
    150			fw_name = FIRMWARE_NAVY_FLOUNDER;
    151		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
    152		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
    153			adev->vcn.indirect_sram = true;
    154		break;
    155	case IP_VERSION(3, 0, 2):
    156		fw_name = FIRMWARE_VANGOGH;
    157		break;
    158	case IP_VERSION(3, 0, 16):
    159		fw_name = FIRMWARE_DIMGREY_CAVEFISH;
    160		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
    161		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
    162			adev->vcn.indirect_sram = true;
    163		break;
    164	case IP_VERSION(3, 0, 33):
    165		fw_name = FIRMWARE_BEIGE_GOBY;
    166		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
    167		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
    168			adev->vcn.indirect_sram = true;
    169		break;
    170	case IP_VERSION(3, 1, 1):
    171		fw_name = FIRMWARE_YELLOW_CARP;
    172		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
    173		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
    174			adev->vcn.indirect_sram = true;
    175		break;
    176	case IP_VERSION(3, 1, 2):
    177		fw_name = FIRMWARE_VCN_3_1_2;
    178		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
    179		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
    180			adev->vcn.indirect_sram = true;
    181		break;
    182	case IP_VERSION(4, 0, 0):
    183		fw_name = FIRMWARE_VCN4_0_0;
    184		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
    185			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
    186			adev->vcn.indirect_sram = true;
    187		break;
    188	case IP_VERSION(4, 0, 4):
    189		fw_name = FIRMWARE_VCN4_0_4;
    190		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
    191			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
    192			adev->vcn.indirect_sram = true;
    193		break;
    194	default:
    195		return -EINVAL;
    196	}
    197
    198	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
    199	if (r) {
    200		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
    201			fw_name);
    202		return r;
    203	}
    204
    205	r = amdgpu_ucode_validate(adev->vcn.fw);
    206	if (r) {
    207		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
    208			fw_name);
    209		release_firmware(adev->vcn.fw);
    210		adev->vcn.fw = NULL;
    211		return r;
    212	}
    213
    214	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
    215	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
    216
    217	/* Bit 20-23, it is encode major and non-zero for new naming convention.
    218	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
    219	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
    220	 * is zero in old naming convention, this field is always zero so far.
    221	 * These four bits are used to tell which naming convention is present.
    222	 */
    223	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
    224	if (fw_check) {
    225		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
    226
    227		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
    228		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
    229		enc_major = fw_check;
    230		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
    231		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
    232		DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
    233			enc_major, enc_minor, dec_ver, vep, fw_rev);
    234	} else {
    235		unsigned int version_major, version_minor, family_id;
    236
    237		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
    238		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
    239		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
    240		DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
    241			version_major, version_minor, family_id);
    242	}
    243
    244	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
    245	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
    246		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
    247
    248	if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)){
    249		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
    250		log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
    251	} else {
    252		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
    253		log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
    254	}
    255
    256	bo_size += fw_shared_size;
    257
    258	if (amdgpu_vcnfw_log)
    259		bo_size += AMDGPU_VCNFW_LOG_SIZE;
    260
    261	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
    262		if (adev->vcn.harvest_config & (1 << i))
    263			continue;
    264
    265		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
    266						AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
    267						&adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
    268		if (r) {
    269			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
    270			return r;
    271		}
    272
    273		adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
    274				bo_size - fw_shared_size;
    275		adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
    276				bo_size - fw_shared_size;
    277
    278		adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
    279
    280		if (amdgpu_vcnfw_log) {
    281			adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
    282			adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
    283			adev->vcn.inst[i].fw_shared.log_offset = log_offset;
    284		}
    285
    286		if (adev->vcn.indirect_sram) {
    287			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
    288					AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
    289					&adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
    290			if (r) {
    291				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
    292				return r;
    293			}
    294		}
    295	}
    296
    297	return 0;
    298}
    299
    300int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
    301{
    302	int i, j;
    303
    304	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
    305		if (adev->vcn.harvest_config & (1 << j))
    306			continue;
    307
    308		if (adev->vcn.indirect_sram) {
    309			amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
    310						  &adev->vcn.inst[j].dpg_sram_gpu_addr,
    311						  (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
    312		}
    313		kvfree(adev->vcn.inst[j].saved_bo);
    314
    315		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
    316					  &adev->vcn.inst[j].gpu_addr,
    317					  (void **)&adev->vcn.inst[j].cpu_addr);
    318
    319		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
    320
    321		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
    322			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
    323	}
    324
    325	release_firmware(adev->vcn.fw);
    326	mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
    327	mutex_destroy(&adev->vcn.vcn_pg_lock);
    328
    329	return 0;
    330}
    331
    332bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
    333{
    334	bool ret = false;
    335	int vcn_config = adev->vcn.vcn_config[vcn_instance];
    336
    337	if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
    338		ret = true;
    339	} else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) {
    340		ret = true;
    341	} else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
    342		ret = true;
    343	}
    344
    345	return ret;
    346}
    347
    348int amdgpu_vcn_suspend(struct amdgpu_device *adev)
    349{
    350	unsigned size;
    351	void *ptr;
    352	int i, idx;
    353
    354	cancel_delayed_work_sync(&adev->vcn.idle_work);
    355
    356	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
    357		if (adev->vcn.harvest_config & (1 << i))
    358			continue;
    359		if (adev->vcn.inst[i].vcpu_bo == NULL)
    360			return 0;
    361
    362		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
    363		ptr = adev->vcn.inst[i].cpu_addr;
    364
    365		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
    366		if (!adev->vcn.inst[i].saved_bo)
    367			return -ENOMEM;
    368
    369		if (drm_dev_enter(adev_to_drm(adev), &idx)) {
    370			memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
    371			drm_dev_exit(idx);
    372		}
    373	}
    374	return 0;
    375}
    376
    377int amdgpu_vcn_resume(struct amdgpu_device *adev)
    378{
    379	unsigned size;
    380	void *ptr;
    381	int i, idx;
    382
    383	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
    384		if (adev->vcn.harvest_config & (1 << i))
    385			continue;
    386		if (adev->vcn.inst[i].vcpu_bo == NULL)
    387			return -EINVAL;
    388
    389		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
    390		ptr = adev->vcn.inst[i].cpu_addr;
    391
    392		if (adev->vcn.inst[i].saved_bo != NULL) {
    393			if (drm_dev_enter(adev_to_drm(adev), &idx)) {
    394				memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
    395				drm_dev_exit(idx);
    396			}
    397			kvfree(adev->vcn.inst[i].saved_bo);
    398			adev->vcn.inst[i].saved_bo = NULL;
    399		} else {
    400			const struct common_firmware_header *hdr;
    401			unsigned offset;
    402
    403			hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
    404			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
    405				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
    406				if (drm_dev_enter(adev_to_drm(adev), &idx)) {
    407					memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
    408						    le32_to_cpu(hdr->ucode_size_bytes));
    409					drm_dev_exit(idx);
    410				}
    411				size -= le32_to_cpu(hdr->ucode_size_bytes);
    412				ptr += le32_to_cpu(hdr->ucode_size_bytes);
    413			}
    414			memset_io(ptr, 0, size);
    415		}
    416	}
    417	return 0;
    418}
    419
    420static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
    421{
    422	struct amdgpu_device *adev =
    423		container_of(work, struct amdgpu_device, vcn.idle_work.work);
    424	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
    425	unsigned int i, j;
    426	int r = 0;
    427
    428	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
    429		if (adev->vcn.harvest_config & (1 << j))
    430			continue;
    431
    432		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
    433			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
    434		}
    435
    436		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
    437			struct dpg_pause_state new_state;
    438
    439			if (fence[j] ||
    440				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
    441				new_state.fw_based = VCN_DPG_STATE__PAUSE;
    442			else
    443				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
    444
    445			adev->vcn.pause_dpg_mode(adev, j, &new_state);
    446		}
    447
    448		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
    449		fences += fence[j];
    450	}
    451
    452	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
    453		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
    454		       AMD_PG_STATE_GATE);
    455		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
    456				false);
    457		if (r)
    458			dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
    459	} else {
    460		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
    461	}
    462}
    463
    464void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
    465{
    466	struct amdgpu_device *adev = ring->adev;
    467	int r = 0;
    468
    469	atomic_inc(&adev->vcn.total_submission_cnt);
    470
    471	if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
    472		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
    473				true);
    474		if (r)
    475			dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
    476	}
    477
    478	mutex_lock(&adev->vcn.vcn_pg_lock);
    479	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
    480	       AMD_PG_STATE_UNGATE);
    481
    482	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
    483		struct dpg_pause_state new_state;
    484
    485		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
    486			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
    487			new_state.fw_based = VCN_DPG_STATE__PAUSE;
    488		} else {
    489			unsigned int fences = 0;
    490			unsigned int i;
    491
    492			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
    493				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
    494
    495			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
    496				new_state.fw_based = VCN_DPG_STATE__PAUSE;
    497			else
    498				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
    499		}
    500
    501		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
    502	}
    503	mutex_unlock(&adev->vcn.vcn_pg_lock);
    504}
    505
    506void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
    507{
    508	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
    509		ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
    510		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
    511
    512	atomic_dec(&ring->adev->vcn.total_submission_cnt);
    513
    514	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
    515}
    516
    517int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
    518{
    519	struct amdgpu_device *adev = ring->adev;
    520	uint32_t tmp = 0;
    521	unsigned i;
    522	int r;
    523
    524	/* VCN in SRIOV does not support direct register read/write */
    525	if (amdgpu_sriov_vf(adev))
    526		return 0;
    527
    528	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
    529	r = amdgpu_ring_alloc(ring, 3);
    530	if (r)
    531		return r;
    532	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
    533	amdgpu_ring_write(ring, 0xDEADBEEF);
    534	amdgpu_ring_commit(ring);
    535	for (i = 0; i < adev->usec_timeout; i++) {
    536		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
    537		if (tmp == 0xDEADBEEF)
    538			break;
    539		udelay(1);
    540	}
    541
    542	if (i >= adev->usec_timeout)
    543		r = -ETIMEDOUT;
    544
    545	return r;
    546}
    547
    548int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
    549{
    550	struct amdgpu_device *adev = ring->adev;
    551	uint32_t rptr;
    552	unsigned int i;
    553	int r;
    554
    555	if (amdgpu_sriov_vf(adev))
    556		return 0;
    557
    558	r = amdgpu_ring_alloc(ring, 16);
    559	if (r)
    560		return r;
    561
    562	rptr = amdgpu_ring_get_rptr(ring);
    563
    564	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
    565	amdgpu_ring_commit(ring);
    566
    567	for (i = 0; i < adev->usec_timeout; i++) {
    568		if (amdgpu_ring_get_rptr(ring) != rptr)
    569			break;
    570		udelay(1);
    571	}
    572
    573	if (i >= adev->usec_timeout)
    574		r = -ETIMEDOUT;
    575
    576	return r;
    577}
    578
    579static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
    580				   struct amdgpu_ib *ib_msg,
    581				   struct dma_fence **fence)
    582{
    583	struct amdgpu_device *adev = ring->adev;
    584	struct dma_fence *f = NULL;
    585	struct amdgpu_job *job;
    586	struct amdgpu_ib *ib;
    587	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
    588	int i, r;
    589
    590	r = amdgpu_job_alloc_with_ib(adev, 64,
    591					AMDGPU_IB_POOL_DIRECT, &job);
    592	if (r)
    593		goto err;
    594
    595	ib = &job->ibs[0];
    596	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
    597	ib->ptr[1] = addr;
    598	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
    599	ib->ptr[3] = addr >> 32;
    600	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
    601	ib->ptr[5] = 0;
    602	for (i = 6; i < 16; i += 2) {
    603		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
    604		ib->ptr[i+1] = 0;
    605	}
    606	ib->length_dw = 16;
    607
    608	r = amdgpu_job_submit_direct(job, ring, &f);
    609	if (r)
    610		goto err_free;
    611
    612	amdgpu_ib_free(adev, ib_msg, f);
    613
    614	if (fence)
    615		*fence = dma_fence_get(f);
    616	dma_fence_put(f);
    617
    618	return 0;
    619
    620err_free:
    621	amdgpu_job_free(job);
    622err:
    623	amdgpu_ib_free(adev, ib_msg, f);
    624	return r;
    625}
    626
    627static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
    628		struct amdgpu_ib *ib)
    629{
    630	struct amdgpu_device *adev = ring->adev;
    631	uint32_t *msg;
    632	int r, i;
    633
    634	memset(ib, 0, sizeof(*ib));
    635	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
    636			AMDGPU_IB_POOL_DIRECT,
    637			ib);
    638	if (r)
    639		return r;
    640
    641	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
    642	msg[0] = cpu_to_le32(0x00000028);
    643	msg[1] = cpu_to_le32(0x00000038);
    644	msg[2] = cpu_to_le32(0x00000001);
    645	msg[3] = cpu_to_le32(0x00000000);
    646	msg[4] = cpu_to_le32(handle);
    647	msg[5] = cpu_to_le32(0x00000000);
    648	msg[6] = cpu_to_le32(0x00000001);
    649	msg[7] = cpu_to_le32(0x00000028);
    650	msg[8] = cpu_to_le32(0x00000010);
    651	msg[9] = cpu_to_le32(0x00000000);
    652	msg[10] = cpu_to_le32(0x00000007);
    653	msg[11] = cpu_to_le32(0x00000000);
    654	msg[12] = cpu_to_le32(0x00000780);
    655	msg[13] = cpu_to_le32(0x00000440);
    656	for (i = 14; i < 1024; ++i)
    657		msg[i] = cpu_to_le32(0x0);
    658
    659	return 0;
    660}
    661
    662static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
    663					  struct amdgpu_ib *ib)
    664{
    665	struct amdgpu_device *adev = ring->adev;
    666	uint32_t *msg;
    667	int r, i;
    668
    669	memset(ib, 0, sizeof(*ib));
    670	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
    671			AMDGPU_IB_POOL_DIRECT,
    672			ib);
    673	if (r)
    674		return r;
    675
    676	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
    677	msg[0] = cpu_to_le32(0x00000028);
    678	msg[1] = cpu_to_le32(0x00000018);
    679	msg[2] = cpu_to_le32(0x00000000);
    680	msg[3] = cpu_to_le32(0x00000002);
    681	msg[4] = cpu_to_le32(handle);
    682	msg[5] = cpu_to_le32(0x00000000);
    683	for (i = 6; i < 1024; ++i)
    684		msg[i] = cpu_to_le32(0x0);
    685
    686	return 0;
    687}
    688
    689int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
    690{
    691	struct dma_fence *fence = NULL;
    692	struct amdgpu_ib ib;
    693	long r;
    694
    695	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
    696	if (r)
    697		goto error;
    698
    699	r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
    700	if (r)
    701		goto error;
    702	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
    703	if (r)
    704		goto error;
    705
    706	r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
    707	if (r)
    708		goto error;
    709
    710	r = dma_fence_wait_timeout(fence, false, timeout);
    711	if (r == 0)
    712		r = -ETIMEDOUT;
    713	else if (r > 0)
    714		r = 0;
    715
    716	dma_fence_put(fence);
    717error:
    718	return r;
    719}
    720
    721static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
    722				      struct amdgpu_ib *ib_msg,
    723				      struct dma_fence **fence)
    724{
    725	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
    726	const unsigned int ib_size_dw = 64;
    727	struct amdgpu_device *adev = ring->adev;
    728	struct dma_fence *f = NULL;
    729	struct amdgpu_job *job;
    730	struct amdgpu_ib *ib;
    731	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
    732	int i, r;
    733
    734	r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
    735				AMDGPU_IB_POOL_DIRECT, &job);
    736	if (r)
    737		goto err;
    738
    739	ib = &job->ibs[0];
    740	ib->length_dw = 0;
    741
    742	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
    743	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
    744	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
    745	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
    746	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
    747
    748	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
    749	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
    750	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
    751
    752	for (i = ib->length_dw; i < ib_size_dw; ++i)
    753		ib->ptr[i] = 0x0;
    754
    755	r = amdgpu_job_submit_direct(job, ring, &f);
    756	if (r)
    757		goto err_free;
    758
    759	amdgpu_ib_free(adev, ib_msg, f);
    760
    761	if (fence)
    762		*fence = dma_fence_get(f);
    763	dma_fence_put(f);
    764
    765	return 0;
    766
    767err_free:
    768	amdgpu_job_free(job);
    769err:
    770	amdgpu_ib_free(adev, ib_msg, f);
    771	return r;
    772}
    773
    774int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
    775{
    776	struct dma_fence *fence = NULL;
    777	struct amdgpu_ib ib;
    778	long r;
    779
    780	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
    781	if (r)
    782		goto error;
    783
    784	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
    785	if (r)
    786		goto error;
    787	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
    788	if (r)
    789		goto error;
    790
    791	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
    792	if (r)
    793		goto error;
    794
    795	r = dma_fence_wait_timeout(fence, false, timeout);
    796	if (r == 0)
    797		r = -ETIMEDOUT;
    798	else if (r > 0)
    799		r = 0;
    800
    801	dma_fence_put(fence);
    802error:
    803	return r;
    804}
    805
    806int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
    807{
    808	struct amdgpu_device *adev = ring->adev;
    809	uint32_t rptr;
    810	unsigned i;
    811	int r;
    812
    813	if (amdgpu_sriov_vf(adev))
    814		return 0;
    815
    816	r = amdgpu_ring_alloc(ring, 16);
    817	if (r)
    818		return r;
    819
    820	rptr = amdgpu_ring_get_rptr(ring);
    821
    822	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
    823	amdgpu_ring_commit(ring);
    824
    825	for (i = 0; i < adev->usec_timeout; i++) {
    826		if (amdgpu_ring_get_rptr(ring) != rptr)
    827			break;
    828		udelay(1);
    829	}
    830
    831	if (i >= adev->usec_timeout)
    832		r = -ETIMEDOUT;
    833
    834	return r;
    835}
    836
    837static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
    838					 struct amdgpu_ib *ib_msg,
    839					 struct dma_fence **fence)
    840{
    841	const unsigned ib_size_dw = 16;
    842	struct amdgpu_job *job;
    843	struct amdgpu_ib *ib;
    844	struct dma_fence *f = NULL;
    845	uint64_t addr;
    846	int i, r;
    847
    848	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
    849					AMDGPU_IB_POOL_DIRECT, &job);
    850	if (r)
    851		return r;
    852
    853	ib = &job->ibs[0];
    854	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
    855
    856	ib->length_dw = 0;
    857	ib->ptr[ib->length_dw++] = 0x00000018;
    858	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
    859	ib->ptr[ib->length_dw++] = handle;
    860	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
    861	ib->ptr[ib->length_dw++] = addr;
    862	ib->ptr[ib->length_dw++] = 0x0000000b;
    863
    864	ib->ptr[ib->length_dw++] = 0x00000014;
    865	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
    866	ib->ptr[ib->length_dw++] = 0x0000001c;
    867	ib->ptr[ib->length_dw++] = 0x00000000;
    868	ib->ptr[ib->length_dw++] = 0x00000000;
    869
    870	ib->ptr[ib->length_dw++] = 0x00000008;
    871	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
    872
    873	for (i = ib->length_dw; i < ib_size_dw; ++i)
    874		ib->ptr[i] = 0x0;
    875
    876	r = amdgpu_job_submit_direct(job, ring, &f);
    877	if (r)
    878		goto err;
    879
    880	if (fence)
    881		*fence = dma_fence_get(f);
    882	dma_fence_put(f);
    883
    884	return 0;
    885
    886err:
    887	amdgpu_job_free(job);
    888	return r;
    889}
    890
    891static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
    892					  struct amdgpu_ib *ib_msg,
    893					  struct dma_fence **fence)
    894{
    895	const unsigned ib_size_dw = 16;
    896	struct amdgpu_job *job;
    897	struct amdgpu_ib *ib;
    898	struct dma_fence *f = NULL;
    899	uint64_t addr;
    900	int i, r;
    901
    902	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
    903					AMDGPU_IB_POOL_DIRECT, &job);
    904	if (r)
    905		return r;
    906
    907	ib = &job->ibs[0];
    908	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
    909
    910	ib->length_dw = 0;
    911	ib->ptr[ib->length_dw++] = 0x00000018;
    912	ib->ptr[ib->length_dw++] = 0x00000001;
    913	ib->ptr[ib->length_dw++] = handle;
    914	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
    915	ib->ptr[ib->length_dw++] = addr;
    916	ib->ptr[ib->length_dw++] = 0x0000000b;
    917
    918	ib->ptr[ib->length_dw++] = 0x00000014;
    919	ib->ptr[ib->length_dw++] = 0x00000002;
    920	ib->ptr[ib->length_dw++] = 0x0000001c;
    921	ib->ptr[ib->length_dw++] = 0x00000000;
    922	ib->ptr[ib->length_dw++] = 0x00000000;
    923
    924	ib->ptr[ib->length_dw++] = 0x00000008;
    925	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
    926
    927	for (i = ib->length_dw; i < ib_size_dw; ++i)
    928		ib->ptr[i] = 0x0;
    929
    930	r = amdgpu_job_submit_direct(job, ring, &f);
    931	if (r)
    932		goto err;
    933
    934	if (fence)
    935		*fence = dma_fence_get(f);
    936	dma_fence_put(f);
    937
    938	return 0;
    939
    940err:
    941	amdgpu_job_free(job);
    942	return r;
    943}
    944
    945int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
    946{
    947	struct amdgpu_device *adev = ring->adev;
    948	struct dma_fence *fence = NULL;
    949	struct amdgpu_ib ib;
    950	long r;
    951
    952	memset(&ib, 0, sizeof(ib));
    953	r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
    954			AMDGPU_IB_POOL_DIRECT,
    955			&ib);
    956	if (r)
    957		return r;
    958
    959	r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
    960	if (r)
    961		goto error;
    962
    963	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
    964	if (r)
    965		goto error;
    966
    967	r = dma_fence_wait_timeout(fence, false, timeout);
    968	if (r == 0)
    969		r = -ETIMEDOUT;
    970	else if (r > 0)
    971		r = 0;
    972
    973error:
    974	amdgpu_ib_free(adev, &ib, fence);
    975	dma_fence_put(fence);
    976
    977	return r;
    978}
    979
    980enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
    981{
    982	switch(ring) {
    983	case 0:
    984		return AMDGPU_RING_PRIO_0;
    985	case 1:
    986		return AMDGPU_RING_PRIO_1;
    987	case 2:
    988		return AMDGPU_RING_PRIO_2;
    989	default:
    990		return AMDGPU_RING_PRIO_0;
    991	}
    992}
    993
    994void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
    995{
    996	int i;
    997	unsigned int idx;
    998
    999	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
   1000		const struct common_firmware_header *hdr;
   1001		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
   1002
   1003		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
   1004			if (adev->vcn.harvest_config & (1 << i))
   1005				continue;
   1006			/* currently only support 2 FW instances */
   1007			if (i >= 2) {
   1008				dev_info(adev->dev, "More then 2 VCN FW instances!\n");
   1009				break;
   1010			}
   1011			idx = AMDGPU_UCODE_ID_VCN + i;
   1012			adev->firmware.ucode[idx].ucode_id = idx;
   1013			adev->firmware.ucode[idx].fw = adev->vcn.fw;
   1014			adev->firmware.fw_size +=
   1015				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
   1016		}
   1017		dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
   1018	}
   1019}
   1020
   1021/*
   1022 * debugfs for mapping vcn firmware log buffer.
   1023 */
   1024#if defined(CONFIG_DEBUG_FS)
   1025static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
   1026                                             size_t size, loff_t *pos)
   1027{
   1028	struct amdgpu_vcn_inst *vcn;
   1029	void *log_buf;
   1030	volatile struct amdgpu_vcn_fwlog *plog;
   1031	unsigned int read_pos, write_pos, available, i, read_bytes = 0;
   1032	unsigned int read_num[2] = {0};
   1033
   1034	vcn = file_inode(f)->i_private;
   1035	if (!vcn)
   1036		return -ENODEV;
   1037
   1038	if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
   1039		return -EFAULT;
   1040
   1041	log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
   1042
   1043	plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
   1044	read_pos = plog->rptr;
   1045	write_pos = plog->wptr;
   1046
   1047	if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
   1048		return -EFAULT;
   1049
   1050	if (!size || (read_pos == write_pos))
   1051		return 0;
   1052
   1053	if (write_pos > read_pos) {
   1054		available = write_pos - read_pos;
   1055		read_num[0] = min(size, (size_t)available);
   1056	} else {
   1057		read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
   1058		available = read_num[0] + write_pos - plog->header_size;
   1059		if (size > available)
   1060			read_num[1] = write_pos - plog->header_size;
   1061		else if (size > read_num[0])
   1062			read_num[1] = size - read_num[0];
   1063		else
   1064			read_num[0] = size;
   1065	}
   1066
   1067	for (i = 0; i < 2; i++) {
   1068		if (read_num[i]) {
   1069			if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
   1070				read_pos = plog->header_size;
   1071			if (read_num[i] == copy_to_user((buf + read_bytes),
   1072			                                (log_buf + read_pos), read_num[i]))
   1073				return -EFAULT;
   1074
   1075			read_bytes += read_num[i];
   1076			read_pos += read_num[i];
   1077		}
   1078	}
   1079
   1080	plog->rptr = read_pos;
   1081	*pos += read_bytes;
   1082	return read_bytes;
   1083}
   1084
   1085static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
   1086	.owner = THIS_MODULE,
   1087	.read = amdgpu_debugfs_vcn_fwlog_read,
   1088	.llseek = default_llseek
   1089};
   1090#endif
   1091
   1092void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
   1093                                   struct amdgpu_vcn_inst *vcn)
   1094{
   1095#if defined(CONFIG_DEBUG_FS)
   1096	struct drm_minor *minor = adev_to_drm(adev)->primary;
   1097	struct dentry *root = minor->debugfs_root;
   1098	char name[32];
   1099
   1100	sprintf(name, "amdgpu_vcn_%d_fwlog", i);
   1101	debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, vcn,
   1102				 &amdgpu_debugfs_vcnfwlog_fops,
   1103				 AMDGPU_VCNFW_LOG_SIZE);
   1104#endif
   1105}
   1106
   1107void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
   1108{
   1109#if defined(CONFIG_DEBUG_FS)
   1110	volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
   1111	void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
   1112	uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
   1113	volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
   1114	volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
   1115                                                         + vcn->fw_shared.log_offset;
   1116	*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
   1117	fw_log->is_enabled = 1;
   1118	fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
   1119	fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
   1120	fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
   1121
   1122	log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
   1123	log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
   1124	log_buf->rptr = log_buf->header_size;
   1125	log_buf->wptr = log_buf->header_size;
   1126	log_buf->wrapped = 0;
   1127#endif
   1128}
   1129
   1130int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
   1131				struct amdgpu_irq_src *source,
   1132				struct amdgpu_iv_entry *entry)
   1133{
   1134	struct ras_common_if *ras_if = adev->vcn.ras_if;
   1135	struct ras_dispatch_if ih_data = {
   1136		.entry = entry,
   1137	};
   1138
   1139	if (!ras_if)
   1140		return 0;
   1141
   1142	ih_data.head = *ras_if;
   1143	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
   1144
   1145	return 0;
   1146}