cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_kms.c (47113B)


      1/*
      2 * Copyright 2008 Advanced Micro Devices, Inc.
      3 * Copyright 2008 Red Hat Inc.
      4 * Copyright 2009 Jerome Glisse.
      5 *
      6 * Permission is hereby granted, free of charge, to any person obtaining a
      7 * copy of this software and associated documentation files (the "Software"),
      8 * to deal in the Software without restriction, including without limitation
      9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10 * and/or sell copies of the Software, and to permit persons to whom the
     11 * Software is furnished to do so, subject to the following conditions:
     12 *
     13 * The above copyright notice and this permission notice shall be included in
     14 * all copies or substantial portions of the Software.
     15 *
     16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22 * OTHER DEALINGS IN THE SOFTWARE.
     23 *
     24 * Authors: Dave Airlie
     25 *          Alex Deucher
     26 *          Jerome Glisse
     27 */
     28
     29#include "amdgpu.h"
     30#include <drm/amdgpu_drm.h>
     31#include <drm/drm_drv.h>
     32#include "amdgpu_uvd.h"
     33#include "amdgpu_vce.h"
     34#include "atom.h"
     35
     36#include <linux/vga_switcheroo.h>
     37#include <linux/slab.h>
     38#include <linux/uaccess.h>
     39#include <linux/pci.h>
     40#include <linux/pm_runtime.h>
     41#include "amdgpu_amdkfd.h"
     42#include "amdgpu_gem.h"
     43#include "amdgpu_display.h"
     44#include "amdgpu_ras.h"
     45
     46static void amdgpu_runtime_pm_quirk(struct amdgpu_device *adev)
     47{
     48	/*
     49	 * Add below quirk on several sienna_cichlid cards to disable
     50	 * runtime pm to fix EMI failures.
     51	 */
     52	if (((adev->pdev->device == 0x73A1) && (adev->pdev->revision == 0x00)) ||
     53	    ((adev->pdev->device == 0x73BF) && (adev->pdev->revision == 0xCF)))
     54		adev->runpm = false;
     55}
     56
     57void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
     58{
     59	struct amdgpu_gpu_instance *gpu_instance;
     60	int i;
     61
     62	mutex_lock(&mgpu_info.mutex);
     63
     64	for (i = 0; i < mgpu_info.num_gpu; i++) {
     65		gpu_instance = &(mgpu_info.gpu_ins[i]);
     66		if (gpu_instance->adev == adev) {
     67			mgpu_info.gpu_ins[i] =
     68				mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
     69			mgpu_info.num_gpu--;
     70			if (adev->flags & AMD_IS_APU)
     71				mgpu_info.num_apu--;
     72			else
     73				mgpu_info.num_dgpu--;
     74			break;
     75		}
     76	}
     77
     78	mutex_unlock(&mgpu_info.mutex);
     79}
     80
     81/**
     82 * amdgpu_driver_unload_kms - Main unload function for KMS.
     83 *
     84 * @dev: drm dev pointer
     85 *
     86 * This is the main unload function for KMS (all asics).
     87 * Returns 0 on success.
     88 */
     89void amdgpu_driver_unload_kms(struct drm_device *dev)
     90{
     91	struct amdgpu_device *adev = drm_to_adev(dev);
     92
     93	if (adev == NULL)
     94		return;
     95
     96	amdgpu_unregister_gpu_instance(adev);
     97
     98	if (adev->rmmio == NULL)
     99		return;
    100
    101	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
    102		DRM_WARN("smart shift update failed\n");
    103
    104	amdgpu_acpi_fini(adev);
    105	amdgpu_device_fini_hw(adev);
    106}
    107
    108void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
    109{
    110	struct amdgpu_gpu_instance *gpu_instance;
    111
    112	mutex_lock(&mgpu_info.mutex);
    113
    114	if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
    115		DRM_ERROR("Cannot register more gpu instance\n");
    116		mutex_unlock(&mgpu_info.mutex);
    117		return;
    118	}
    119
    120	gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
    121	gpu_instance->adev = adev;
    122	gpu_instance->mgpu_fan_enabled = 0;
    123
    124	mgpu_info.num_gpu++;
    125	if (adev->flags & AMD_IS_APU)
    126		mgpu_info.num_apu++;
    127	else
    128		mgpu_info.num_dgpu++;
    129
    130	mutex_unlock(&mgpu_info.mutex);
    131}
    132
    133/**
    134 * amdgpu_driver_load_kms - Main load function for KMS.
    135 *
    136 * @adev: pointer to struct amdgpu_device
    137 * @flags: device flags
    138 *
    139 * This is the main load function for KMS (all asics).
    140 * Returns 0 on success, error on failure.
    141 */
    142int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
    143{
    144	struct drm_device *dev;
    145	int r, acpi_status;
    146
    147	dev = adev_to_drm(adev);
    148
    149	/* amdgpu_device_init should report only fatal error
    150	 * like memory allocation failure or iomapping failure,
    151	 * or memory manager initialization failure, it must
    152	 * properly initialize the GPU MC controller and permit
    153	 * VRAM allocation
    154	 */
    155	r = amdgpu_device_init(adev, flags);
    156	if (r) {
    157		dev_err(dev->dev, "Fatal error during GPU init\n");
    158		goto out;
    159	}
    160
    161	if (amdgpu_device_supports_px(dev) &&
    162	    (amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */
    163		adev->runpm = true;
    164		dev_info(adev->dev, "Using ATPX for runtime pm\n");
    165	} else if (amdgpu_device_supports_boco(dev) &&
    166		   (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */
    167		adev->runpm = true;
    168		dev_info(adev->dev, "Using BOCO for runtime pm\n");
    169	} else if (amdgpu_device_supports_baco(dev) &&
    170		   (amdgpu_runtime_pm != 0)) {
    171		switch (adev->asic_type) {
    172		case CHIP_VEGA20:
    173		case CHIP_ARCTURUS:
    174			/* enable runpm if runpm=1 */
    175			if (amdgpu_runtime_pm > 0)
    176				adev->runpm = true;
    177			break;
    178		case CHIP_VEGA10:
    179			/* turn runpm on if noretry=0 */
    180			if (!adev->gmc.noretry)
    181				adev->runpm = true;
    182			break;
    183		default:
    184			/* enable runpm on CI+ */
    185			adev->runpm = true;
    186			break;
    187		}
    188
    189		amdgpu_runtime_pm_quirk(adev);
    190
    191		if (adev->runpm)
    192			dev_info(adev->dev, "Using BACO for runtime pm\n");
    193	}
    194
    195	/* Call ACPI methods: require modeset init
    196	 * but failure is not fatal
    197	 */
    198
    199	acpi_status = amdgpu_acpi_init(adev);
    200	if (acpi_status)
    201		dev_dbg(dev->dev, "Error during ACPI methods call\n");
    202
    203	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
    204		DRM_WARN("smart shift update failed\n");
    205
    206out:
    207	if (r)
    208		amdgpu_driver_unload_kms(dev);
    209
    210	return r;
    211}
    212
    213static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
    214				struct drm_amdgpu_query_fw *query_fw,
    215				struct amdgpu_device *adev)
    216{
    217	switch (query_fw->fw_type) {
    218	case AMDGPU_INFO_FW_VCE:
    219		fw_info->ver = adev->vce.fw_version;
    220		fw_info->feature = adev->vce.fb_version;
    221		break;
    222	case AMDGPU_INFO_FW_UVD:
    223		fw_info->ver = adev->uvd.fw_version;
    224		fw_info->feature = 0;
    225		break;
    226	case AMDGPU_INFO_FW_VCN:
    227		fw_info->ver = adev->vcn.fw_version;
    228		fw_info->feature = 0;
    229		break;
    230	case AMDGPU_INFO_FW_GMC:
    231		fw_info->ver = adev->gmc.fw_version;
    232		fw_info->feature = 0;
    233		break;
    234	case AMDGPU_INFO_FW_GFX_ME:
    235		fw_info->ver = adev->gfx.me_fw_version;
    236		fw_info->feature = adev->gfx.me_feature_version;
    237		break;
    238	case AMDGPU_INFO_FW_GFX_PFP:
    239		fw_info->ver = adev->gfx.pfp_fw_version;
    240		fw_info->feature = adev->gfx.pfp_feature_version;
    241		break;
    242	case AMDGPU_INFO_FW_GFX_CE:
    243		fw_info->ver = adev->gfx.ce_fw_version;
    244		fw_info->feature = adev->gfx.ce_feature_version;
    245		break;
    246	case AMDGPU_INFO_FW_GFX_RLC:
    247		fw_info->ver = adev->gfx.rlc_fw_version;
    248		fw_info->feature = adev->gfx.rlc_feature_version;
    249		break;
    250	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
    251		fw_info->ver = adev->gfx.rlc_srlc_fw_version;
    252		fw_info->feature = adev->gfx.rlc_srlc_feature_version;
    253		break;
    254	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
    255		fw_info->ver = adev->gfx.rlc_srlg_fw_version;
    256		fw_info->feature = adev->gfx.rlc_srlg_feature_version;
    257		break;
    258	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
    259		fw_info->ver = adev->gfx.rlc_srls_fw_version;
    260		fw_info->feature = adev->gfx.rlc_srls_feature_version;
    261		break;
    262	case AMDGPU_INFO_FW_GFX_MEC:
    263		if (query_fw->index == 0) {
    264			fw_info->ver = adev->gfx.mec_fw_version;
    265			fw_info->feature = adev->gfx.mec_feature_version;
    266		} else if (query_fw->index == 1) {
    267			fw_info->ver = adev->gfx.mec2_fw_version;
    268			fw_info->feature = adev->gfx.mec2_feature_version;
    269		} else
    270			return -EINVAL;
    271		break;
    272	case AMDGPU_INFO_FW_SMC:
    273		fw_info->ver = adev->pm.fw_version;
    274		fw_info->feature = 0;
    275		break;
    276	case AMDGPU_INFO_FW_TA:
    277		switch (query_fw->index) {
    278		case TA_FW_TYPE_PSP_XGMI:
    279			fw_info->ver = adev->psp.xgmi_context.context.bin_desc.fw_version;
    280			fw_info->feature = adev->psp.xgmi_context.context
    281						   .bin_desc.feature_version;
    282			break;
    283		case TA_FW_TYPE_PSP_RAS:
    284			fw_info->ver = adev->psp.ras_context.context.bin_desc.fw_version;
    285			fw_info->feature = adev->psp.ras_context.context
    286						   .bin_desc.feature_version;
    287			break;
    288		case TA_FW_TYPE_PSP_HDCP:
    289			fw_info->ver = adev->psp.hdcp_context.context.bin_desc.fw_version;
    290			fw_info->feature = adev->psp.hdcp_context.context
    291						   .bin_desc.feature_version;
    292			break;
    293		case TA_FW_TYPE_PSP_DTM:
    294			fw_info->ver = adev->psp.dtm_context.context.bin_desc.fw_version;
    295			fw_info->feature = adev->psp.dtm_context.context
    296						   .bin_desc.feature_version;
    297			break;
    298		case TA_FW_TYPE_PSP_RAP:
    299			fw_info->ver = adev->psp.rap_context.context.bin_desc.fw_version;
    300			fw_info->feature = adev->psp.rap_context.context
    301						   .bin_desc.feature_version;
    302			break;
    303		case TA_FW_TYPE_PSP_SECUREDISPLAY:
    304			fw_info->ver = adev->psp.securedisplay_context.context.bin_desc.fw_version;
    305			fw_info->feature =
    306				adev->psp.securedisplay_context.context.bin_desc
    307					.feature_version;
    308			break;
    309		default:
    310			return -EINVAL;
    311		}
    312		break;
    313	case AMDGPU_INFO_FW_SDMA:
    314		if (query_fw->index >= adev->sdma.num_instances)
    315			return -EINVAL;
    316		fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
    317		fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
    318		break;
    319	case AMDGPU_INFO_FW_SOS:
    320		fw_info->ver = adev->psp.sos.fw_version;
    321		fw_info->feature = adev->psp.sos.feature_version;
    322		break;
    323	case AMDGPU_INFO_FW_ASD:
    324		fw_info->ver = adev->psp.asd_context.bin_desc.fw_version;
    325		fw_info->feature = adev->psp.asd_context.bin_desc.feature_version;
    326		break;
    327	case AMDGPU_INFO_FW_DMCU:
    328		fw_info->ver = adev->dm.dmcu_fw_version;
    329		fw_info->feature = 0;
    330		break;
    331	case AMDGPU_INFO_FW_DMCUB:
    332		fw_info->ver = adev->dm.dmcub_fw_version;
    333		fw_info->feature = 0;
    334		break;
    335	case AMDGPU_INFO_FW_TOC:
    336		fw_info->ver = adev->psp.toc.fw_version;
    337		fw_info->feature = adev->psp.toc.feature_version;
    338		break;
    339	case AMDGPU_INFO_FW_CAP:
    340		fw_info->ver = adev->psp.cap_fw_version;
    341		fw_info->feature = adev->psp.cap_feature_version;
    342		break;
    343	default:
    344		return -EINVAL;
    345	}
    346	return 0;
    347}
    348
    349static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
    350			     struct drm_amdgpu_info *info,
    351			     struct drm_amdgpu_info_hw_ip *result)
    352{
    353	uint32_t ib_start_alignment = 0;
    354	uint32_t ib_size_alignment = 0;
    355	enum amd_ip_block_type type;
    356	unsigned int num_rings = 0;
    357	unsigned int i, j;
    358
    359	if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
    360		return -EINVAL;
    361
    362	switch (info->query_hw_ip.type) {
    363	case AMDGPU_HW_IP_GFX:
    364		type = AMD_IP_BLOCK_TYPE_GFX;
    365		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
    366			if (adev->gfx.gfx_ring[i].sched.ready)
    367				++num_rings;
    368		ib_start_alignment = 32;
    369		ib_size_alignment = 32;
    370		break;
    371	case AMDGPU_HW_IP_COMPUTE:
    372		type = AMD_IP_BLOCK_TYPE_GFX;
    373		for (i = 0; i < adev->gfx.num_compute_rings; i++)
    374			if (adev->gfx.compute_ring[i].sched.ready)
    375				++num_rings;
    376		ib_start_alignment = 32;
    377		ib_size_alignment = 32;
    378		break;
    379	case AMDGPU_HW_IP_DMA:
    380		type = AMD_IP_BLOCK_TYPE_SDMA;
    381		for (i = 0; i < adev->sdma.num_instances; i++)
    382			if (adev->sdma.instance[i].ring.sched.ready)
    383				++num_rings;
    384		ib_start_alignment = 256;
    385		ib_size_alignment = 4;
    386		break;
    387	case AMDGPU_HW_IP_UVD:
    388		type = AMD_IP_BLOCK_TYPE_UVD;
    389		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
    390			if (adev->uvd.harvest_config & (1 << i))
    391				continue;
    392
    393			if (adev->uvd.inst[i].ring.sched.ready)
    394				++num_rings;
    395		}
    396		ib_start_alignment = 64;
    397		ib_size_alignment = 64;
    398		break;
    399	case AMDGPU_HW_IP_VCE:
    400		type = AMD_IP_BLOCK_TYPE_VCE;
    401		for (i = 0; i < adev->vce.num_rings; i++)
    402			if (adev->vce.ring[i].sched.ready)
    403				++num_rings;
    404		ib_start_alignment = 4;
    405		ib_size_alignment = 1;
    406		break;
    407	case AMDGPU_HW_IP_UVD_ENC:
    408		type = AMD_IP_BLOCK_TYPE_UVD;
    409		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
    410			if (adev->uvd.harvest_config & (1 << i))
    411				continue;
    412
    413			for (j = 0; j < adev->uvd.num_enc_rings; j++)
    414				if (adev->uvd.inst[i].ring_enc[j].sched.ready)
    415					++num_rings;
    416		}
    417		ib_start_alignment = 64;
    418		ib_size_alignment = 64;
    419		break;
    420	case AMDGPU_HW_IP_VCN_DEC:
    421		type = AMD_IP_BLOCK_TYPE_VCN;
    422		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
    423			if (adev->uvd.harvest_config & (1 << i))
    424				continue;
    425
    426			if (adev->vcn.inst[i].ring_dec.sched.ready)
    427				++num_rings;
    428		}
    429		ib_start_alignment = 16;
    430		ib_size_alignment = 16;
    431		break;
    432	case AMDGPU_HW_IP_VCN_ENC:
    433		type = AMD_IP_BLOCK_TYPE_VCN;
    434		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
    435			if (adev->uvd.harvest_config & (1 << i))
    436				continue;
    437
    438			for (j = 0; j < adev->vcn.num_enc_rings; j++)
    439				if (adev->vcn.inst[i].ring_enc[j].sched.ready)
    440					++num_rings;
    441		}
    442		ib_start_alignment = 64;
    443		ib_size_alignment = 1;
    444		break;
    445	case AMDGPU_HW_IP_VCN_JPEG:
    446		type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
    447			AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
    448
    449		for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
    450			if (adev->jpeg.harvest_config & (1 << i))
    451				continue;
    452
    453			if (adev->jpeg.inst[i].ring_dec.sched.ready)
    454				++num_rings;
    455		}
    456		ib_start_alignment = 16;
    457		ib_size_alignment = 16;
    458		break;
    459	default:
    460		return -EINVAL;
    461	}
    462
    463	for (i = 0; i < adev->num_ip_blocks; i++)
    464		if (adev->ip_blocks[i].version->type == type &&
    465		    adev->ip_blocks[i].status.valid)
    466			break;
    467
    468	if (i == adev->num_ip_blocks)
    469		return 0;
    470
    471	num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
    472			num_rings);
    473
    474	result->hw_ip_version_major = adev->ip_blocks[i].version->major;
    475	result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
    476	result->capabilities_flags = 0;
    477	result->available_rings = (1 << num_rings) - 1;
    478	result->ib_start_alignment = ib_start_alignment;
    479	result->ib_size_alignment = ib_size_alignment;
    480	return 0;
    481}
    482
    483/*
    484 * Userspace get information ioctl
    485 */
    486/**
    487 * amdgpu_info_ioctl - answer a device specific request.
    488 *
    489 * @dev: drm device pointer
    490 * @data: request object
    491 * @filp: drm filp
    492 *
    493 * This function is used to pass device specific parameters to the userspace
    494 * drivers.  Examples include: pci device id, pipeline parms, tiling params,
    495 * etc. (all asics).
    496 * Returns 0 on success, -EINVAL on failure.
    497 */
    498int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
    499{
    500	struct amdgpu_device *adev = drm_to_adev(dev);
    501	struct drm_amdgpu_info *info = data;
    502	struct amdgpu_mode_info *minfo = &adev->mode_info;
    503	void __user *out = (void __user *)(uintptr_t)info->return_pointer;
    504	uint32_t size = info->return_size;
    505	struct drm_crtc *crtc;
    506	uint32_t ui32 = 0;
    507	uint64_t ui64 = 0;
    508	int i, found;
    509	int ui32_size = sizeof(ui32);
    510
    511	if (!info->return_size || !info->return_pointer)
    512		return -EINVAL;
    513
    514	switch (info->query) {
    515	case AMDGPU_INFO_ACCEL_WORKING:
    516		ui32 = adev->accel_working;
    517		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
    518	case AMDGPU_INFO_CRTC_FROM_ID:
    519		for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
    520			crtc = (struct drm_crtc *)minfo->crtcs[i];
    521			if (crtc && crtc->base.id == info->mode_crtc.id) {
    522				struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
    523				ui32 = amdgpu_crtc->crtc_id;
    524				found = 1;
    525				break;
    526			}
    527		}
    528		if (!found) {
    529			DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
    530			return -EINVAL;
    531		}
    532		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
    533	case AMDGPU_INFO_HW_IP_INFO: {
    534		struct drm_amdgpu_info_hw_ip ip = {};
    535		int ret;
    536
    537		ret = amdgpu_hw_ip_info(adev, info, &ip);
    538		if (ret)
    539			return ret;
    540
    541		ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
    542		return ret ? -EFAULT : 0;
    543	}
    544	case AMDGPU_INFO_HW_IP_COUNT: {
    545		enum amd_ip_block_type type;
    546		uint32_t count = 0;
    547
    548		switch (info->query_hw_ip.type) {
    549		case AMDGPU_HW_IP_GFX:
    550			type = AMD_IP_BLOCK_TYPE_GFX;
    551			break;
    552		case AMDGPU_HW_IP_COMPUTE:
    553			type = AMD_IP_BLOCK_TYPE_GFX;
    554			break;
    555		case AMDGPU_HW_IP_DMA:
    556			type = AMD_IP_BLOCK_TYPE_SDMA;
    557			break;
    558		case AMDGPU_HW_IP_UVD:
    559			type = AMD_IP_BLOCK_TYPE_UVD;
    560			break;
    561		case AMDGPU_HW_IP_VCE:
    562			type = AMD_IP_BLOCK_TYPE_VCE;
    563			break;
    564		case AMDGPU_HW_IP_UVD_ENC:
    565			type = AMD_IP_BLOCK_TYPE_UVD;
    566			break;
    567		case AMDGPU_HW_IP_VCN_DEC:
    568		case AMDGPU_HW_IP_VCN_ENC:
    569			type = AMD_IP_BLOCK_TYPE_VCN;
    570			break;
    571		case AMDGPU_HW_IP_VCN_JPEG:
    572			type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
    573				AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
    574			break;
    575		default:
    576			return -EINVAL;
    577		}
    578
    579		for (i = 0; i < adev->num_ip_blocks; i++)
    580			if (adev->ip_blocks[i].version->type == type &&
    581			    adev->ip_blocks[i].status.valid &&
    582			    count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
    583				count++;
    584
    585		return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
    586	}
    587	case AMDGPU_INFO_TIMESTAMP:
    588		ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
    589		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
    590	case AMDGPU_INFO_FW_VERSION: {
    591		struct drm_amdgpu_info_firmware fw_info;
    592		int ret;
    593
    594		/* We only support one instance of each IP block right now. */
    595		if (info->query_fw.ip_instance != 0)
    596			return -EINVAL;
    597
    598		ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
    599		if (ret)
    600			return ret;
    601
    602		return copy_to_user(out, &fw_info,
    603				    min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
    604	}
    605	case AMDGPU_INFO_NUM_BYTES_MOVED:
    606		ui64 = atomic64_read(&adev->num_bytes_moved);
    607		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
    608	case AMDGPU_INFO_NUM_EVICTIONS:
    609		ui64 = atomic64_read(&adev->num_evictions);
    610		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
    611	case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
    612		ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
    613		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
    614	case AMDGPU_INFO_VRAM_USAGE:
    615		ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
    616		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
    617	case AMDGPU_INFO_VIS_VRAM_USAGE:
    618		ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
    619		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
    620	case AMDGPU_INFO_GTT_USAGE:
    621		ui64 = ttm_resource_manager_usage(&adev->mman.gtt_mgr.manager);
    622		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
    623	case AMDGPU_INFO_GDS_CONFIG: {
    624		struct drm_amdgpu_info_gds gds_info;
    625
    626		memset(&gds_info, 0, sizeof(gds_info));
    627		gds_info.compute_partition_size = adev->gds.gds_size;
    628		gds_info.gds_total_size = adev->gds.gds_size;
    629		gds_info.gws_per_compute_partition = adev->gds.gws_size;
    630		gds_info.oa_per_compute_partition = adev->gds.oa_size;
    631		return copy_to_user(out, &gds_info,
    632				    min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
    633	}
    634	case AMDGPU_INFO_VRAM_GTT: {
    635		struct drm_amdgpu_info_vram_gtt vram_gtt;
    636
    637		vram_gtt.vram_size = adev->gmc.real_vram_size -
    638			atomic64_read(&adev->vram_pin_size) -
    639			AMDGPU_VM_RESERVED_VRAM;
    640		vram_gtt.vram_cpu_accessible_size =
    641			min(adev->gmc.visible_vram_size -
    642			    atomic64_read(&adev->visible_pin_size),
    643			    vram_gtt.vram_size);
    644		vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
    645		vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
    646		return copy_to_user(out, &vram_gtt,
    647				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
    648	}
    649	case AMDGPU_INFO_MEMORY: {
    650		struct drm_amdgpu_memory_info mem;
    651		struct ttm_resource_manager *gtt_man =
    652			&adev->mman.gtt_mgr.manager;
    653		struct ttm_resource_manager *vram_man =
    654			&adev->mman.vram_mgr.manager;
    655
    656		memset(&mem, 0, sizeof(mem));
    657		mem.vram.total_heap_size = adev->gmc.real_vram_size;
    658		mem.vram.usable_heap_size = adev->gmc.real_vram_size -
    659			atomic64_read(&adev->vram_pin_size) -
    660			AMDGPU_VM_RESERVED_VRAM;
    661		mem.vram.heap_usage =
    662			ttm_resource_manager_usage(vram_man);
    663		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
    664
    665		mem.cpu_accessible_vram.total_heap_size =
    666			adev->gmc.visible_vram_size;
    667		mem.cpu_accessible_vram.usable_heap_size =
    668			min(adev->gmc.visible_vram_size -
    669			    atomic64_read(&adev->visible_pin_size),
    670			    mem.vram.usable_heap_size);
    671		mem.cpu_accessible_vram.heap_usage =
    672			amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
    673		mem.cpu_accessible_vram.max_allocation =
    674			mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
    675
    676		mem.gtt.total_heap_size = gtt_man->size;
    677		mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
    678			atomic64_read(&adev->gart_pin_size);
    679		mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man);
    680		mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
    681
    682		return copy_to_user(out, &mem,
    683				    min((size_t)size, sizeof(mem)))
    684				    ? -EFAULT : 0;
    685	}
    686	case AMDGPU_INFO_READ_MMR_REG: {
    687		unsigned n, alloc_size;
    688		uint32_t *regs;
    689		unsigned se_num = (info->read_mmr_reg.instance >>
    690				   AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
    691				  AMDGPU_INFO_MMR_SE_INDEX_MASK;
    692		unsigned sh_num = (info->read_mmr_reg.instance >>
    693				   AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
    694				  AMDGPU_INFO_MMR_SH_INDEX_MASK;
    695
    696		/* set full masks if the userspace set all bits
    697		 * in the bitfields */
    698		if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
    699			se_num = 0xffffffff;
    700		else if (se_num >= AMDGPU_GFX_MAX_SE)
    701			return -EINVAL;
    702		if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
    703			sh_num = 0xffffffff;
    704		else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
    705			return -EINVAL;
    706
    707		if (info->read_mmr_reg.count > 128)
    708			return -EINVAL;
    709
    710		regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
    711		if (!regs)
    712			return -ENOMEM;
    713		alloc_size = info->read_mmr_reg.count * sizeof(*regs);
    714
    715		amdgpu_gfx_off_ctrl(adev, false);
    716		for (i = 0; i < info->read_mmr_reg.count; i++) {
    717			if (amdgpu_asic_read_register(adev, se_num, sh_num,
    718						      info->read_mmr_reg.dword_offset + i,
    719						      &regs[i])) {
    720				DRM_DEBUG_KMS("unallowed offset %#x\n",
    721					      info->read_mmr_reg.dword_offset + i);
    722				kfree(regs);
    723				amdgpu_gfx_off_ctrl(adev, true);
    724				return -EFAULT;
    725			}
    726		}
    727		amdgpu_gfx_off_ctrl(adev, true);
    728		n = copy_to_user(out, regs, min(size, alloc_size));
    729		kfree(regs);
    730		return n ? -EFAULT : 0;
    731	}
    732	case AMDGPU_INFO_DEV_INFO: {
    733		struct drm_amdgpu_info_device *dev_info;
    734		uint64_t vm_size;
    735		int ret;
    736
    737		dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
    738		if (!dev_info)
    739			return -ENOMEM;
    740
    741		dev_info->device_id = adev->pdev->device;
    742		dev_info->chip_rev = adev->rev_id;
    743		dev_info->external_rev = adev->external_rev_id;
    744		dev_info->pci_rev = adev->pdev->revision;
    745		dev_info->family = adev->family;
    746		dev_info->num_shader_engines = adev->gfx.config.max_shader_engines;
    747		dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
    748		/* return all clocks in KHz */
    749		dev_info->gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
    750		if (adev->pm.dpm_enabled) {
    751			dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
    752			dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
    753		} else {
    754			dev_info->max_engine_clock = adev->clock.default_sclk * 10;
    755			dev_info->max_memory_clock = adev->clock.default_mclk * 10;
    756		}
    757		dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
    758		dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se *
    759			adev->gfx.config.max_shader_engines;
    760		dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
    761		dev_info->_pad = 0;
    762		dev_info->ids_flags = 0;
    763		if (adev->flags & AMD_IS_APU)
    764			dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
    765		if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
    766			dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
    767		if (amdgpu_is_tmz(adev))
    768			dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
    769
    770		vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
    771		vm_size -= AMDGPU_VA_RESERVED_SIZE;
    772
    773		/* Older VCE FW versions are buggy and can handle only 40bits */
    774		if (adev->vce.fw_version &&
    775		    adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
    776			vm_size = min(vm_size, 1ULL << 40);
    777
    778		dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
    779		dev_info->virtual_address_max =
    780			min(vm_size, AMDGPU_GMC_HOLE_START);
    781
    782		if (vm_size > AMDGPU_GMC_HOLE_START) {
    783			dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
    784			dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
    785		}
    786		dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
    787		dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
    788		dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
    789		dev_info->cu_active_number = adev->gfx.cu_info.number;
    790		dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
    791		dev_info->ce_ram_size = adev->gfx.ce_ram_size;
    792		memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
    793		       sizeof(adev->gfx.cu_info.ao_cu_bitmap));
    794		memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
    795		       sizeof(adev->gfx.cu_info.bitmap));
    796		dev_info->vram_type = adev->gmc.vram_type;
    797		dev_info->vram_bit_width = adev->gmc.vram_width;
    798		dev_info->vce_harvest_config = adev->vce.harvest_config;
    799		dev_info->gc_double_offchip_lds_buf =
    800			adev->gfx.config.double_offchip_lds_buf;
    801		dev_info->wave_front_size = adev->gfx.cu_info.wave_front_size;
    802		dev_info->num_shader_visible_vgprs = adev->gfx.config.max_gprs;
    803		dev_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
    804		dev_info->num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
    805		dev_info->gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
    806		dev_info->gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
    807		dev_info->max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
    808
    809		if (adev->family >= AMDGPU_FAMILY_NV)
    810			dev_info->pa_sc_tile_steering_override =
    811				adev->gfx.config.pa_sc_tile_steering_override;
    812
    813		dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
    814
    815		ret = copy_to_user(out, dev_info,
    816				   min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
    817		kfree(dev_info);
    818		return ret;
    819	}
    820	case AMDGPU_INFO_VCE_CLOCK_TABLE: {
    821		unsigned i;
    822		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
    823		struct amd_vce_state *vce_state;
    824
    825		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
    826			vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
    827			if (vce_state) {
    828				vce_clk_table.entries[i].sclk = vce_state->sclk;
    829				vce_clk_table.entries[i].mclk = vce_state->mclk;
    830				vce_clk_table.entries[i].eclk = vce_state->evclk;
    831				vce_clk_table.num_valid_entries++;
    832			}
    833		}
    834
    835		return copy_to_user(out, &vce_clk_table,
    836				    min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
    837	}
    838	case AMDGPU_INFO_VBIOS: {
    839		uint32_t bios_size = adev->bios_size;
    840
    841		switch (info->vbios_info.type) {
    842		case AMDGPU_INFO_VBIOS_SIZE:
    843			return copy_to_user(out, &bios_size,
    844					min((size_t)size, sizeof(bios_size)))
    845					? -EFAULT : 0;
    846		case AMDGPU_INFO_VBIOS_IMAGE: {
    847			uint8_t *bios;
    848			uint32_t bios_offset = info->vbios_info.offset;
    849
    850			if (bios_offset >= bios_size)
    851				return -EINVAL;
    852
    853			bios = adev->bios + bios_offset;
    854			return copy_to_user(out, bios,
    855					    min((size_t)size, (size_t)(bios_size - bios_offset)))
    856					? -EFAULT : 0;
    857		}
    858		case AMDGPU_INFO_VBIOS_INFO: {
    859			struct drm_amdgpu_info_vbios vbios_info = {};
    860			struct atom_context *atom_context;
    861
    862			atom_context = adev->mode_info.atom_context;
    863			memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name));
    864			memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn));
    865			vbios_info.version = atom_context->version;
    866			memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
    867						sizeof(atom_context->vbios_ver_str));
    868			memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date));
    869
    870			return copy_to_user(out, &vbios_info,
    871						min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
    872		}
    873		default:
    874			DRM_DEBUG_KMS("Invalid request %d\n",
    875					info->vbios_info.type);
    876			return -EINVAL;
    877		}
    878	}
    879	case AMDGPU_INFO_NUM_HANDLES: {
    880		struct drm_amdgpu_info_num_handles handle;
    881
    882		switch (info->query_hw_ip.type) {
    883		case AMDGPU_HW_IP_UVD:
    884			/* Starting Polaris, we support unlimited UVD handles */
    885			if (adev->asic_type < CHIP_POLARIS10) {
    886				handle.uvd_max_handles = adev->uvd.max_handles;
    887				handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
    888
    889				return copy_to_user(out, &handle,
    890					min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
    891			} else {
    892				return -ENODATA;
    893			}
    894
    895			break;
    896		default:
    897			return -EINVAL;
    898		}
    899	}
    900	case AMDGPU_INFO_SENSOR: {
    901		if (!adev->pm.dpm_enabled)
    902			return -ENOENT;
    903
    904		switch (info->sensor_info.type) {
    905		case AMDGPU_INFO_SENSOR_GFX_SCLK:
    906			/* get sclk in Mhz */
    907			if (amdgpu_dpm_read_sensor(adev,
    908						   AMDGPU_PP_SENSOR_GFX_SCLK,
    909						   (void *)&ui32, &ui32_size)) {
    910				return -EINVAL;
    911			}
    912			ui32 /= 100;
    913			break;
    914		case AMDGPU_INFO_SENSOR_GFX_MCLK:
    915			/* get mclk in Mhz */
    916			if (amdgpu_dpm_read_sensor(adev,
    917						   AMDGPU_PP_SENSOR_GFX_MCLK,
    918						   (void *)&ui32, &ui32_size)) {
    919				return -EINVAL;
    920			}
    921			ui32 /= 100;
    922			break;
    923		case AMDGPU_INFO_SENSOR_GPU_TEMP:
    924			/* get temperature in millidegrees C */
    925			if (amdgpu_dpm_read_sensor(adev,
    926						   AMDGPU_PP_SENSOR_GPU_TEMP,
    927						   (void *)&ui32, &ui32_size)) {
    928				return -EINVAL;
    929			}
    930			break;
    931		case AMDGPU_INFO_SENSOR_GPU_LOAD:
    932			/* get GPU load */
    933			if (amdgpu_dpm_read_sensor(adev,
    934						   AMDGPU_PP_SENSOR_GPU_LOAD,
    935						   (void *)&ui32, &ui32_size)) {
    936				return -EINVAL;
    937			}
    938			break;
    939		case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
    940			/* get average GPU power */
    941			if (amdgpu_dpm_read_sensor(adev,
    942						   AMDGPU_PP_SENSOR_GPU_POWER,
    943						   (void *)&ui32, &ui32_size)) {
    944				return -EINVAL;
    945			}
    946			ui32 >>= 8;
    947			break;
    948		case AMDGPU_INFO_SENSOR_VDDNB:
    949			/* get VDDNB in millivolts */
    950			if (amdgpu_dpm_read_sensor(adev,
    951						   AMDGPU_PP_SENSOR_VDDNB,
    952						   (void *)&ui32, &ui32_size)) {
    953				return -EINVAL;
    954			}
    955			break;
    956		case AMDGPU_INFO_SENSOR_VDDGFX:
    957			/* get VDDGFX in millivolts */
    958			if (amdgpu_dpm_read_sensor(adev,
    959						   AMDGPU_PP_SENSOR_VDDGFX,
    960						   (void *)&ui32, &ui32_size)) {
    961				return -EINVAL;
    962			}
    963			break;
    964		case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
    965			/* get stable pstate sclk in Mhz */
    966			if (amdgpu_dpm_read_sensor(adev,
    967						   AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
    968						   (void *)&ui32, &ui32_size)) {
    969				return -EINVAL;
    970			}
    971			ui32 /= 100;
    972			break;
    973		case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
    974			/* get stable pstate mclk in Mhz */
    975			if (amdgpu_dpm_read_sensor(adev,
    976						   AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
    977						   (void *)&ui32, &ui32_size)) {
    978				return -EINVAL;
    979			}
    980			ui32 /= 100;
    981			break;
    982		default:
    983			DRM_DEBUG_KMS("Invalid request %d\n",
    984				      info->sensor_info.type);
    985			return -EINVAL;
    986		}
    987		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
    988	}
    989	case AMDGPU_INFO_VRAM_LOST_COUNTER:
    990		ui32 = atomic_read(&adev->vram_lost_counter);
    991		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
    992	case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
    993		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
    994		uint64_t ras_mask;
    995
    996		if (!ras)
    997			return -EINVAL;
    998		ras_mask = (uint64_t)adev->ras_enabled << 32 | ras->features;
    999
   1000		return copy_to_user(out, &ras_mask,
   1001				min_t(u64, size, sizeof(ras_mask))) ?
   1002			-EFAULT : 0;
   1003	}
   1004	case AMDGPU_INFO_VIDEO_CAPS: {
   1005		const struct amdgpu_video_codecs *codecs;
   1006		struct drm_amdgpu_info_video_caps *caps;
   1007		int r;
   1008
   1009		switch (info->video_cap.type) {
   1010		case AMDGPU_INFO_VIDEO_CAPS_DECODE:
   1011			r = amdgpu_asic_query_video_codecs(adev, false, &codecs);
   1012			if (r)
   1013				return -EINVAL;
   1014			break;
   1015		case AMDGPU_INFO_VIDEO_CAPS_ENCODE:
   1016			r = amdgpu_asic_query_video_codecs(adev, true, &codecs);
   1017			if (r)
   1018				return -EINVAL;
   1019			break;
   1020		default:
   1021			DRM_DEBUG_KMS("Invalid request %d\n",
   1022				      info->video_cap.type);
   1023			return -EINVAL;
   1024		}
   1025
   1026		caps = kzalloc(sizeof(*caps), GFP_KERNEL);
   1027		if (!caps)
   1028			return -ENOMEM;
   1029
   1030		for (i = 0; i < codecs->codec_count; i++) {
   1031			int idx = codecs->codec_array[i].codec_type;
   1032
   1033			switch (idx) {
   1034			case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2:
   1035			case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4:
   1036			case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1:
   1037			case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC:
   1038			case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC:
   1039			case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG:
   1040			case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9:
   1041			case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1:
   1042				caps->codec_info[idx].valid = 1;
   1043				caps->codec_info[idx].max_width =
   1044					codecs->codec_array[i].max_width;
   1045				caps->codec_info[idx].max_height =
   1046					codecs->codec_array[i].max_height;
   1047				caps->codec_info[idx].max_pixels_per_frame =
   1048					codecs->codec_array[i].max_pixels_per_frame;
   1049				caps->codec_info[idx].max_level =
   1050					codecs->codec_array[i].max_level;
   1051				break;
   1052			default:
   1053				break;
   1054			}
   1055		}
   1056		r = copy_to_user(out, caps,
   1057				 min((size_t)size, sizeof(*caps))) ? -EFAULT : 0;
   1058		kfree(caps);
   1059		return r;
   1060	}
   1061	default:
   1062		DRM_DEBUG_KMS("Invalid request %d\n", info->query);
   1063		return -EINVAL;
   1064	}
   1065	return 0;
   1066}
   1067
   1068
   1069/*
   1070 * Outdated mess for old drm with Xorg being in charge (void function now).
   1071 */
   1072/**
   1073 * amdgpu_driver_lastclose_kms - drm callback for last close
   1074 *
   1075 * @dev: drm dev pointer
   1076 *
   1077 * Switch vga_switcheroo state after last close (all asics).
   1078 */
   1079void amdgpu_driver_lastclose_kms(struct drm_device *dev)
   1080{
   1081	drm_fb_helper_lastclose(dev);
   1082	vga_switcheroo_process_delayed_switch();
   1083}
   1084
   1085/**
   1086 * amdgpu_driver_open_kms - drm callback for open
   1087 *
   1088 * @dev: drm dev pointer
   1089 * @file_priv: drm file
   1090 *
   1091 * On device open, init vm on cayman+ (all asics).
   1092 * Returns 0 on success, error on failure.
   1093 */
   1094int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
   1095{
   1096	struct amdgpu_device *adev = drm_to_adev(dev);
   1097	struct amdgpu_fpriv *fpriv;
   1098	int r, pasid;
   1099
   1100	/* Ensure IB tests are run on ring */
   1101	flush_delayed_work(&adev->delayed_init_work);
   1102
   1103
   1104	if (amdgpu_ras_intr_triggered()) {
   1105		DRM_ERROR("RAS Intr triggered, device disabled!!");
   1106		return -EHWPOISON;
   1107	}
   1108
   1109	file_priv->driver_priv = NULL;
   1110
   1111	r = pm_runtime_get_sync(dev->dev);
   1112	if (r < 0)
   1113		goto pm_put;
   1114
   1115	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
   1116	if (unlikely(!fpriv)) {
   1117		r = -ENOMEM;
   1118		goto out_suspend;
   1119	}
   1120
   1121	pasid = amdgpu_pasid_alloc(16);
   1122	if (pasid < 0) {
   1123		dev_warn(adev->dev, "No more PASIDs available!");
   1124		pasid = 0;
   1125	}
   1126
   1127	r = amdgpu_vm_init(adev, &fpriv->vm);
   1128	if (r)
   1129		goto error_pasid;
   1130
   1131	r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
   1132	if (r)
   1133		goto error_vm;
   1134
   1135	fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
   1136	if (!fpriv->prt_va) {
   1137		r = -ENOMEM;
   1138		goto error_vm;
   1139	}
   1140
   1141	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
   1142		uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
   1143
   1144		r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
   1145						&fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
   1146		if (r)
   1147			goto error_vm;
   1148	}
   1149
   1150	mutex_init(&fpriv->bo_list_lock);
   1151	idr_init(&fpriv->bo_list_handles);
   1152
   1153	amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
   1154
   1155	file_priv->driver_priv = fpriv;
   1156	goto out_suspend;
   1157
   1158error_vm:
   1159	amdgpu_vm_fini(adev, &fpriv->vm);
   1160
   1161error_pasid:
   1162	if (pasid) {
   1163		amdgpu_pasid_free(pasid);
   1164		amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
   1165	}
   1166
   1167	kfree(fpriv);
   1168
   1169out_suspend:
   1170	pm_runtime_mark_last_busy(dev->dev);
   1171pm_put:
   1172	pm_runtime_put_autosuspend(dev->dev);
   1173
   1174	return r;
   1175}
   1176
   1177/**
   1178 * amdgpu_driver_postclose_kms - drm callback for post close
   1179 *
   1180 * @dev: drm dev pointer
   1181 * @file_priv: drm file
   1182 *
   1183 * On device post close, tear down vm on cayman+ (all asics).
   1184 */
   1185void amdgpu_driver_postclose_kms(struct drm_device *dev,
   1186				 struct drm_file *file_priv)
   1187{
   1188	struct amdgpu_device *adev = drm_to_adev(dev);
   1189	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
   1190	struct amdgpu_bo_list *list;
   1191	struct amdgpu_bo *pd;
   1192	u32 pasid;
   1193	int handle;
   1194
   1195	if (!fpriv)
   1196		return;
   1197
   1198	pm_runtime_get_sync(dev->dev);
   1199
   1200	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
   1201		amdgpu_uvd_free_handles(adev, file_priv);
   1202	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
   1203		amdgpu_vce_free_handles(adev, file_priv);
   1204
   1205	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
   1206		/* TODO: how to handle reserve failure */
   1207		BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
   1208		amdgpu_vm_bo_del(adev, fpriv->csa_va);
   1209		fpriv->csa_va = NULL;
   1210		amdgpu_bo_unreserve(adev->virt.csa_obj);
   1211	}
   1212
   1213	pasid = fpriv->vm.pasid;
   1214	pd = amdgpu_bo_ref(fpriv->vm.root.bo);
   1215	if (!WARN_ON(amdgpu_bo_reserve(pd, true))) {
   1216		amdgpu_vm_bo_del(adev, fpriv->prt_va);
   1217		amdgpu_bo_unreserve(pd);
   1218	}
   1219
   1220	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
   1221	amdgpu_vm_fini(adev, &fpriv->vm);
   1222
   1223	if (pasid)
   1224		amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
   1225	amdgpu_bo_unref(&pd);
   1226
   1227	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
   1228		amdgpu_bo_list_put(list);
   1229
   1230	idr_destroy(&fpriv->bo_list_handles);
   1231	mutex_destroy(&fpriv->bo_list_lock);
   1232
   1233	kfree(fpriv);
   1234	file_priv->driver_priv = NULL;
   1235
   1236	pm_runtime_mark_last_busy(dev->dev);
   1237	pm_runtime_put_autosuspend(dev->dev);
   1238}
   1239
   1240
   1241void amdgpu_driver_release_kms(struct drm_device *dev)
   1242{
   1243	struct amdgpu_device *adev = drm_to_adev(dev);
   1244
   1245	amdgpu_device_fini_sw(adev);
   1246	pci_set_drvdata(adev->pdev, NULL);
   1247}
   1248
   1249/*
   1250 * VBlank related functions.
   1251 */
   1252/**
   1253 * amdgpu_get_vblank_counter_kms - get frame count
   1254 *
   1255 * @crtc: crtc to get the frame count from
   1256 *
   1257 * Gets the frame count on the requested crtc (all asics).
   1258 * Returns frame count on success, -EINVAL on failure.
   1259 */
   1260u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
   1261{
   1262	struct drm_device *dev = crtc->dev;
   1263	unsigned int pipe = crtc->index;
   1264	struct amdgpu_device *adev = drm_to_adev(dev);
   1265	int vpos, hpos, stat;
   1266	u32 count;
   1267
   1268	if (pipe >= adev->mode_info.num_crtc) {
   1269		DRM_ERROR("Invalid crtc %u\n", pipe);
   1270		return -EINVAL;
   1271	}
   1272
   1273	/* The hw increments its frame counter at start of vsync, not at start
   1274	 * of vblank, as is required by DRM core vblank counter handling.
   1275	 * Cook the hw count here to make it appear to the caller as if it
   1276	 * incremented at start of vblank. We measure distance to start of
   1277	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
   1278	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
   1279	 * result by 1 to give the proper appearance to caller.
   1280	 */
   1281	if (adev->mode_info.crtcs[pipe]) {
   1282		/* Repeat readout if needed to provide stable result if
   1283		 * we cross start of vsync during the queries.
   1284		 */
   1285		do {
   1286			count = amdgpu_display_vblank_get_counter(adev, pipe);
   1287			/* Ask amdgpu_display_get_crtc_scanoutpos to return
   1288			 * vpos as distance to start of vblank, instead of
   1289			 * regular vertical scanout pos.
   1290			 */
   1291			stat = amdgpu_display_get_crtc_scanoutpos(
   1292				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
   1293				&vpos, &hpos, NULL, NULL,
   1294				&adev->mode_info.crtcs[pipe]->base.hwmode);
   1295		} while (count != amdgpu_display_vblank_get_counter(adev, pipe));
   1296
   1297		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
   1298		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
   1299			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
   1300		} else {
   1301			DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
   1302				      pipe, vpos);
   1303
   1304			/* Bump counter if we are at >= leading edge of vblank,
   1305			 * but before vsync where vpos would turn negative and
   1306			 * the hw counter really increments.
   1307			 */
   1308			if (vpos >= 0)
   1309				count++;
   1310		}
   1311	} else {
   1312		/* Fallback to use value as is. */
   1313		count = amdgpu_display_vblank_get_counter(adev, pipe);
   1314		DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
   1315	}
   1316
   1317	return count;
   1318}
   1319
   1320/**
   1321 * amdgpu_enable_vblank_kms - enable vblank interrupt
   1322 *
   1323 * @crtc: crtc to enable vblank interrupt for
   1324 *
   1325 * Enable the interrupt on the requested crtc (all asics).
   1326 * Returns 0 on success, -EINVAL on failure.
   1327 */
   1328int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
   1329{
   1330	struct drm_device *dev = crtc->dev;
   1331	unsigned int pipe = crtc->index;
   1332	struct amdgpu_device *adev = drm_to_adev(dev);
   1333	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
   1334
   1335	return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
   1336}
   1337
   1338/**
   1339 * amdgpu_disable_vblank_kms - disable vblank interrupt
   1340 *
   1341 * @crtc: crtc to disable vblank interrupt for
   1342 *
   1343 * Disable the interrupt on the requested crtc (all asics).
   1344 */
   1345void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
   1346{
   1347	struct drm_device *dev = crtc->dev;
   1348	unsigned int pipe = crtc->index;
   1349	struct amdgpu_device *adev = drm_to_adev(dev);
   1350	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
   1351
   1352	amdgpu_irq_put(adev, &adev->crtc_irq, idx);
   1353}
   1354
   1355/*
   1356 * Debugfs info
   1357 */
   1358#if defined(CONFIG_DEBUG_FS)
   1359
   1360static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
   1361{
   1362	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
   1363	struct drm_amdgpu_info_firmware fw_info;
   1364	struct drm_amdgpu_query_fw query_fw;
   1365	struct atom_context *ctx = adev->mode_info.atom_context;
   1366	uint8_t smu_program, smu_major, smu_minor, smu_debug;
   1367	int ret, i;
   1368
   1369	static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
   1370#define TA_FW_NAME(type) [TA_FW_TYPE_PSP_##type] = #type
   1371		TA_FW_NAME(XGMI),
   1372		TA_FW_NAME(RAS),
   1373		TA_FW_NAME(HDCP),
   1374		TA_FW_NAME(DTM),
   1375		TA_FW_NAME(RAP),
   1376		TA_FW_NAME(SECUREDISPLAY),
   1377#undef TA_FW_NAME
   1378	};
   1379
   1380	/* VCE */
   1381	query_fw.fw_type = AMDGPU_INFO_FW_VCE;
   1382	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1383	if (ret)
   1384		return ret;
   1385	seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
   1386		   fw_info.feature, fw_info.ver);
   1387
   1388	/* UVD */
   1389	query_fw.fw_type = AMDGPU_INFO_FW_UVD;
   1390	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1391	if (ret)
   1392		return ret;
   1393	seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
   1394		   fw_info.feature, fw_info.ver);
   1395
   1396	/* GMC */
   1397	query_fw.fw_type = AMDGPU_INFO_FW_GMC;
   1398	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1399	if (ret)
   1400		return ret;
   1401	seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
   1402		   fw_info.feature, fw_info.ver);
   1403
   1404	/* ME */
   1405	query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
   1406	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1407	if (ret)
   1408		return ret;
   1409	seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
   1410		   fw_info.feature, fw_info.ver);
   1411
   1412	/* PFP */
   1413	query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
   1414	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1415	if (ret)
   1416		return ret;
   1417	seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
   1418		   fw_info.feature, fw_info.ver);
   1419
   1420	/* CE */
   1421	query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
   1422	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1423	if (ret)
   1424		return ret;
   1425	seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
   1426		   fw_info.feature, fw_info.ver);
   1427
   1428	/* RLC */
   1429	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
   1430	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1431	if (ret)
   1432		return ret;
   1433	seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
   1434		   fw_info.feature, fw_info.ver);
   1435
   1436	/* RLC SAVE RESTORE LIST CNTL */
   1437	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
   1438	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1439	if (ret)
   1440		return ret;
   1441	seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
   1442		   fw_info.feature, fw_info.ver);
   1443
   1444	/* RLC SAVE RESTORE LIST GPM MEM */
   1445	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
   1446	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1447	if (ret)
   1448		return ret;
   1449	seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
   1450		   fw_info.feature, fw_info.ver);
   1451
   1452	/* RLC SAVE RESTORE LIST SRM MEM */
   1453	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
   1454	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1455	if (ret)
   1456		return ret;
   1457	seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
   1458		   fw_info.feature, fw_info.ver);
   1459
   1460	/* MEC */
   1461	query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
   1462	query_fw.index = 0;
   1463	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1464	if (ret)
   1465		return ret;
   1466	seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
   1467		   fw_info.feature, fw_info.ver);
   1468
   1469	/* MEC2 */
   1470	if (adev->gfx.mec2_fw) {
   1471		query_fw.index = 1;
   1472		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1473		if (ret)
   1474			return ret;
   1475		seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
   1476			   fw_info.feature, fw_info.ver);
   1477	}
   1478
   1479	/* PSP SOS */
   1480	query_fw.fw_type = AMDGPU_INFO_FW_SOS;
   1481	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1482	if (ret)
   1483		return ret;
   1484	seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
   1485		   fw_info.feature, fw_info.ver);
   1486
   1487
   1488	/* PSP ASD */
   1489	query_fw.fw_type = AMDGPU_INFO_FW_ASD;
   1490	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1491	if (ret)
   1492		return ret;
   1493	seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
   1494		   fw_info.feature, fw_info.ver);
   1495
   1496	query_fw.fw_type = AMDGPU_INFO_FW_TA;
   1497	for (i = TA_FW_TYPE_PSP_XGMI; i < TA_FW_TYPE_MAX_INDEX; i++) {
   1498		query_fw.index = i;
   1499		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1500		if (ret)
   1501			continue;
   1502
   1503		seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
   1504			   ta_fw_name[i], fw_info.feature, fw_info.ver);
   1505	}
   1506
   1507	/* SMC */
   1508	query_fw.fw_type = AMDGPU_INFO_FW_SMC;
   1509	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1510	if (ret)
   1511		return ret;
   1512	smu_program = (fw_info.ver >> 24) & 0xff;
   1513	smu_major = (fw_info.ver >> 16) & 0xff;
   1514	smu_minor = (fw_info.ver >> 8) & 0xff;
   1515	smu_debug = (fw_info.ver >> 0) & 0xff;
   1516	seq_printf(m, "SMC feature version: %u, program: %d, firmware version: 0x%08x (%d.%d.%d)\n",
   1517		   fw_info.feature, smu_program, fw_info.ver, smu_major, smu_minor, smu_debug);
   1518
   1519	/* SDMA */
   1520	query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
   1521	for (i = 0; i < adev->sdma.num_instances; i++) {
   1522		query_fw.index = i;
   1523		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1524		if (ret)
   1525			return ret;
   1526		seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
   1527			   i, fw_info.feature, fw_info.ver);
   1528	}
   1529
   1530	/* VCN */
   1531	query_fw.fw_type = AMDGPU_INFO_FW_VCN;
   1532	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1533	if (ret)
   1534		return ret;
   1535	seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
   1536		   fw_info.feature, fw_info.ver);
   1537
   1538	/* DMCU */
   1539	query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
   1540	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1541	if (ret)
   1542		return ret;
   1543	seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
   1544		   fw_info.feature, fw_info.ver);
   1545
   1546	/* DMCUB */
   1547	query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
   1548	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1549	if (ret)
   1550		return ret;
   1551	seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
   1552		   fw_info.feature, fw_info.ver);
   1553
   1554	/* TOC */
   1555	query_fw.fw_type = AMDGPU_INFO_FW_TOC;
   1556	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1557	if (ret)
   1558		return ret;
   1559	seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n",
   1560		   fw_info.feature, fw_info.ver);
   1561
   1562	/* CAP */
   1563	if (adev->psp.cap_fw) {
   1564		query_fw.fw_type = AMDGPU_INFO_FW_CAP;
   1565		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
   1566		if (ret)
   1567			return ret;
   1568		seq_printf(m, "CAP feature version: %u, firmware version: 0x%08x\n",
   1569				fw_info.feature, fw_info.ver);
   1570	}
   1571
   1572	seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
   1573
   1574	return 0;
   1575}
   1576
   1577DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_firmware_info);
   1578
   1579#endif
   1580
   1581void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
   1582{
   1583#if defined(CONFIG_DEBUG_FS)
   1584	struct drm_minor *minor = adev_to_drm(adev)->primary;
   1585	struct dentry *root = minor->debugfs_root;
   1586
   1587	debugfs_create_file("amdgpu_firmware_info", 0444, root,
   1588			    adev, &amdgpu_debugfs_firmware_info_fops);
   1589
   1590#endif
   1591}