cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_virt.c (28599B)


      1/*
      2 * Copyright 2016 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#include <linux/module.h>
     25
     26#ifdef CONFIG_X86
     27#include <asm/hypervisor.h>
     28#endif
     29
     30#include <drm/drm_drv.h>
     31#include <xen/xen.h>
     32
     33#include "amdgpu.h"
     34#include "amdgpu_ras.h"
     35#include "vi.h"
     36#include "soc15.h"
     37#include "nv.h"
     38
     39#define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
     40	do { \
     41		vf2pf_info->ucode_info[ucode].id = ucode; \
     42		vf2pf_info->ucode_info[ucode].version = ver; \
     43	} while (0)
     44
     45bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
     46{
     47	/* By now all MMIO pages except mailbox are blocked */
     48	/* if blocking is enabled in hypervisor. Choose the */
     49	/* SCRATCH_REG0 to test. */
     50	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
     51}
     52
     53void amdgpu_virt_init_setting(struct amdgpu_device *adev)
     54{
     55	struct drm_device *ddev = adev_to_drm(adev);
     56
     57	/* enable virtual display */
     58	if (adev->asic_type != CHIP_ALDEBARAN &&
     59	    adev->asic_type != CHIP_ARCTURUS) {
     60		if (adev->mode_info.num_crtc == 0)
     61			adev->mode_info.num_crtc = 1;
     62		adev->enable_virtual_display = true;
     63	}
     64	ddev->driver_features &= ~DRIVER_ATOMIC;
     65	adev->cg_flags = 0;
     66	adev->pg_flags = 0;
     67}
     68
     69void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
     70					uint32_t reg0, uint32_t reg1,
     71					uint32_t ref, uint32_t mask)
     72{
     73	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
     74	struct amdgpu_ring *ring = &kiq->ring;
     75	signed long r, cnt = 0;
     76	unsigned long flags;
     77	uint32_t seq;
     78
     79	spin_lock_irqsave(&kiq->ring_lock, flags);
     80	amdgpu_ring_alloc(ring, 32);
     81	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
     82					    ref, mask);
     83	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
     84	if (r)
     85		goto failed_undo;
     86
     87	amdgpu_ring_commit(ring);
     88	spin_unlock_irqrestore(&kiq->ring_lock, flags);
     89
     90	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
     91
     92	/* don't wait anymore for IRQ context */
     93	if (r < 1 && in_interrupt())
     94		goto failed_kiq;
     95
     96	might_sleep();
     97	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
     98
     99		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
    100		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
    101	}
    102
    103	if (cnt > MAX_KIQ_REG_TRY)
    104		goto failed_kiq;
    105
    106	return;
    107
    108failed_undo:
    109	amdgpu_ring_undo(ring);
    110	spin_unlock_irqrestore(&kiq->ring_lock, flags);
    111failed_kiq:
    112	dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
    113}
    114
    115/**
    116 * amdgpu_virt_request_full_gpu() - request full gpu access
    117 * @adev:	amdgpu device.
    118 * @init:	is driver init time.
    119 * When start to init/fini driver, first need to request full gpu access.
    120 * Return: Zero if request success, otherwise will return error.
    121 */
    122int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
    123{
    124	struct amdgpu_virt *virt = &adev->virt;
    125	int r;
    126
    127	if (virt->ops && virt->ops->req_full_gpu) {
    128		r = virt->ops->req_full_gpu(adev, init);
    129		if (r)
    130			return r;
    131
    132		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
    133	}
    134
    135	return 0;
    136}
    137
    138/**
    139 * amdgpu_virt_release_full_gpu() - release full gpu access
    140 * @adev:	amdgpu device.
    141 * @init:	is driver init time.
    142 * When finishing driver init/fini, need to release full gpu access.
    143 * Return: Zero if release success, otherwise will returen error.
    144 */
    145int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
    146{
    147	struct amdgpu_virt *virt = &adev->virt;
    148	int r;
    149
    150	if (virt->ops && virt->ops->rel_full_gpu) {
    151		r = virt->ops->rel_full_gpu(adev, init);
    152		if (r)
    153			return r;
    154
    155		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
    156	}
    157	return 0;
    158}
    159
    160/**
    161 * amdgpu_virt_reset_gpu() - reset gpu
    162 * @adev:	amdgpu device.
    163 * Send reset command to GPU hypervisor to reset GPU that VM is using
    164 * Return: Zero if reset success, otherwise will return error.
    165 */
    166int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
    167{
    168	struct amdgpu_virt *virt = &adev->virt;
    169	int r;
    170
    171	if (virt->ops && virt->ops->reset_gpu) {
    172		r = virt->ops->reset_gpu(adev);
    173		if (r)
    174			return r;
    175
    176		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
    177	}
    178
    179	return 0;
    180}
    181
    182void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
    183{
    184	struct amdgpu_virt *virt = &adev->virt;
    185
    186	if (virt->ops && virt->ops->req_init_data)
    187		virt->ops->req_init_data(adev);
    188
    189	if (adev->virt.req_init_data_ver > 0)
    190		DRM_INFO("host supports REQ_INIT_DATA handshake\n");
    191	else
    192		DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
    193}
    194
    195/**
    196 * amdgpu_virt_wait_reset() - wait for reset gpu completed
    197 * @adev:	amdgpu device.
    198 * Wait for GPU reset completed.
    199 * Return: Zero if reset success, otherwise will return error.
    200 */
    201int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
    202{
    203	struct amdgpu_virt *virt = &adev->virt;
    204
    205	if (!virt->ops || !virt->ops->wait_reset)
    206		return -EINVAL;
    207
    208	return virt->ops->wait_reset(adev);
    209}
    210
    211/**
    212 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
    213 * @adev:	amdgpu device.
    214 * MM table is used by UVD and VCE for its initialization
    215 * Return: Zero if allocate success.
    216 */
    217int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
    218{
    219	int r;
    220
    221	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
    222		return 0;
    223
    224	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
    225				    AMDGPU_GEM_DOMAIN_VRAM,
    226				    &adev->virt.mm_table.bo,
    227				    &adev->virt.mm_table.gpu_addr,
    228				    (void *)&adev->virt.mm_table.cpu_addr);
    229	if (r) {
    230		DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
    231		return r;
    232	}
    233
    234	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
    235	DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
    236		 adev->virt.mm_table.gpu_addr,
    237		 adev->virt.mm_table.cpu_addr);
    238	return 0;
    239}
    240
    241/**
    242 * amdgpu_virt_free_mm_table() - free mm table memory
    243 * @adev:	amdgpu device.
    244 * Free MM table memory
    245 */
    246void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
    247{
    248	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
    249		return;
    250
    251	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
    252			      &adev->virt.mm_table.gpu_addr,
    253			      (void *)&adev->virt.mm_table.cpu_addr);
    254	adev->virt.mm_table.gpu_addr = 0;
    255}
    256
    257
    258unsigned int amd_sriov_msg_checksum(void *obj,
    259				unsigned long obj_size,
    260				unsigned int key,
    261				unsigned int checksum)
    262{
    263	unsigned int ret = key;
    264	unsigned long i = 0;
    265	unsigned char *pos;
    266
    267	pos = (char *)obj;
    268	/* calculate checksum */
    269	for (i = 0; i < obj_size; ++i)
    270		ret += *(pos + i);
    271	/* minus the checksum itself */
    272	pos = (char *)&checksum;
    273	for (i = 0; i < sizeof(checksum); ++i)
    274		ret -= *(pos + i);
    275	return ret;
    276}
    277
    278static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
    279{
    280	struct amdgpu_virt *virt = &adev->virt;
    281	struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
    282	/* GPU will be marked bad on host if bp count more then 10,
    283	 * so alloc 512 is enough.
    284	 */
    285	unsigned int align_space = 512;
    286	void *bps = NULL;
    287	struct amdgpu_bo **bps_bo = NULL;
    288
    289	*data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
    290	if (!*data)
    291		goto data_failure;
    292
    293	bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
    294	if (!bps)
    295		goto bps_failure;
    296
    297	bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
    298	if (!bps_bo)
    299		goto bps_bo_failure;
    300
    301	(*data)->bps = bps;
    302	(*data)->bps_bo = bps_bo;
    303	(*data)->count = 0;
    304	(*data)->last_reserved = 0;
    305
    306	virt->ras_init_done = true;
    307
    308	return 0;
    309
    310bps_bo_failure:
    311	kfree(bps);
    312bps_failure:
    313	kfree(*data);
    314data_failure:
    315	return -ENOMEM;
    316}
    317
    318static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
    319{
    320	struct amdgpu_virt *virt = &adev->virt;
    321	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
    322	struct amdgpu_bo *bo;
    323	int i;
    324
    325	if (!data)
    326		return;
    327
    328	for (i = data->last_reserved - 1; i >= 0; i--) {
    329		bo = data->bps_bo[i];
    330		amdgpu_bo_free_kernel(&bo, NULL, NULL);
    331		data->bps_bo[i] = bo;
    332		data->last_reserved = i;
    333	}
    334}
    335
    336void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
    337{
    338	struct amdgpu_virt *virt = &adev->virt;
    339	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
    340
    341	virt->ras_init_done = false;
    342
    343	if (!data)
    344		return;
    345
    346	amdgpu_virt_ras_release_bp(adev);
    347
    348	kfree(data->bps);
    349	kfree(data->bps_bo);
    350	kfree(data);
    351	virt->virt_eh_data = NULL;
    352}
    353
    354static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
    355		struct eeprom_table_record *bps, int pages)
    356{
    357	struct amdgpu_virt *virt = &adev->virt;
    358	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
    359
    360	if (!data)
    361		return;
    362
    363	memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
    364	data->count += pages;
    365}
    366
    367static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
    368{
    369	struct amdgpu_virt *virt = &adev->virt;
    370	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
    371	struct amdgpu_bo *bo = NULL;
    372	uint64_t bp;
    373	int i;
    374
    375	if (!data)
    376		return;
    377
    378	for (i = data->last_reserved; i < data->count; i++) {
    379		bp = data->bps[i].retired_page;
    380
    381		/* There are two cases of reserve error should be ignored:
    382		 * 1) a ras bad page has been allocated (used by someone);
    383		 * 2) a ras bad page has been reserved (duplicate error injection
    384		 *    for one page);
    385		 */
    386		if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
    387					       AMDGPU_GPU_PAGE_SIZE,
    388					       AMDGPU_GEM_DOMAIN_VRAM,
    389					       &bo, NULL))
    390			DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
    391
    392		data->bps_bo[i] = bo;
    393		data->last_reserved = i + 1;
    394		bo = NULL;
    395	}
    396}
    397
    398static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
    399		uint64_t retired_page)
    400{
    401	struct amdgpu_virt *virt = &adev->virt;
    402	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
    403	int i;
    404
    405	if (!data)
    406		return true;
    407
    408	for (i = 0; i < data->count; i++)
    409		if (retired_page == data->bps[i].retired_page)
    410			return true;
    411
    412	return false;
    413}
    414
    415static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
    416		uint64_t bp_block_offset, uint32_t bp_block_size)
    417{
    418	struct eeprom_table_record bp;
    419	uint64_t retired_page;
    420	uint32_t bp_idx, bp_cnt;
    421
    422	if (bp_block_size) {
    423		bp_cnt = bp_block_size / sizeof(uint64_t);
    424		for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
    425			retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
    426					bp_block_offset + bp_idx * sizeof(uint64_t));
    427			bp.retired_page = retired_page;
    428
    429			if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
    430				continue;
    431
    432			amdgpu_virt_ras_add_bps(adev, &bp, 1);
    433
    434			amdgpu_virt_ras_reserve_bps(adev);
    435		}
    436	}
    437}
    438
    439static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
    440{
    441	struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
    442	uint32_t checksum;
    443	uint32_t checkval;
    444
    445	uint32_t i;
    446	uint32_t tmp;
    447
    448	if (adev->virt.fw_reserve.p_pf2vf == NULL)
    449		return -EINVAL;
    450
    451	if (pf2vf_info->size > 1024) {
    452		DRM_ERROR("invalid pf2vf message size\n");
    453		return -EINVAL;
    454	}
    455
    456	switch (pf2vf_info->version) {
    457	case 1:
    458		checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
    459		checkval = amd_sriov_msg_checksum(
    460			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
    461			adev->virt.fw_reserve.checksum_key, checksum);
    462		if (checksum != checkval) {
    463			DRM_ERROR("invalid pf2vf message\n");
    464			return -EINVAL;
    465		}
    466
    467		adev->virt.gim_feature =
    468			((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
    469		break;
    470	case 2:
    471		/* TODO: missing key, need to add it later */
    472		checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
    473		checkval = amd_sriov_msg_checksum(
    474			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
    475			0, checksum);
    476		if (checksum != checkval) {
    477			DRM_ERROR("invalid pf2vf message\n");
    478			return -EINVAL;
    479		}
    480
    481		adev->virt.vf2pf_update_interval_ms =
    482			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
    483		adev->virt.gim_feature =
    484			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
    485		adev->virt.reg_access =
    486			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
    487
    488		adev->virt.decode_max_dimension_pixels = 0;
    489		adev->virt.decode_max_frame_pixels = 0;
    490		adev->virt.encode_max_dimension_pixels = 0;
    491		adev->virt.encode_max_frame_pixels = 0;
    492		adev->virt.is_mm_bw_enabled = false;
    493		for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
    494			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
    495			adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
    496
    497			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
    498			adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
    499
    500			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
    501			adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
    502
    503			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
    504			adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
    505		}
    506		if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
    507			adev->virt.is_mm_bw_enabled = true;
    508
    509		adev->unique_id =
    510			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
    511		break;
    512	default:
    513		DRM_ERROR("invalid pf2vf version\n");
    514		return -EINVAL;
    515	}
    516
    517	/* correct too large or too little interval value */
    518	if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
    519		adev->virt.vf2pf_update_interval_ms = 2000;
    520
    521	return 0;
    522}
    523
    524static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
    525{
    526	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
    527	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
    528
    529	if (adev->virt.fw_reserve.p_vf2pf == NULL)
    530		return;
    531
    532	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,      adev->vce.fw_version);
    533	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD,      adev->uvd.fw_version);
    534	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC,       adev->gmc.fw_version);
    535	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME,       adev->gfx.me_fw_version);
    536	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP,      adev->gfx.pfp_fw_version);
    537	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE,       adev->gfx.ce_fw_version);
    538	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC,      adev->gfx.rlc_fw_version);
    539	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
    540	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
    541	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
    542	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
    543	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
    544	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos.fw_version);
    545	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
    546			    adev->psp.asd_context.bin_desc.fw_version);
    547	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
    548			    adev->psp.ras_context.context.bin_desc.fw_version);
    549	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
    550			    adev->psp.xgmi_context.context.bin_desc.fw_version);
    551	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
    552	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
    553	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
    554	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN,      adev->vcn.fw_version);
    555	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU,     adev->dm.dmcu_fw_version);
    556}
    557
    558static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
    559{
    560	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
    561
    562	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
    563
    564	if (adev->virt.fw_reserve.p_vf2pf == NULL)
    565		return -EINVAL;
    566
    567	memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
    568
    569	vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
    570	vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
    571
    572#ifdef MODULE
    573	if (THIS_MODULE->version != NULL)
    574		strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
    575	else
    576#endif
    577		strcpy(vf2pf_info->driver_version, "N/A");
    578
    579	vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
    580	vf2pf_info->driver_cert = 0;
    581	vf2pf_info->os_info.all = 0;
    582
    583	vf2pf_info->fb_usage =
    584		ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
    585	vf2pf_info->fb_vis_usage =
    586		amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
    587	vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
    588	vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
    589
    590	amdgpu_virt_populate_vf2pf_ucode_info(adev);
    591
    592	/* TODO: read dynamic info */
    593	vf2pf_info->gfx_usage = 0;
    594	vf2pf_info->compute_usage = 0;
    595	vf2pf_info->encode_usage = 0;
    596	vf2pf_info->decode_usage = 0;
    597
    598	vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
    599	vf2pf_info->checksum =
    600		amd_sriov_msg_checksum(
    601		vf2pf_info, vf2pf_info->header.size, 0, 0);
    602
    603	return 0;
    604}
    605
    606static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
    607{
    608	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
    609	int ret;
    610
    611	ret = amdgpu_virt_read_pf2vf_data(adev);
    612	if (ret)
    613		goto out;
    614	amdgpu_virt_write_vf2pf_data(adev);
    615
    616out:
    617	schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
    618}
    619
    620void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
    621{
    622	if (adev->virt.vf2pf_update_interval_ms != 0) {
    623		DRM_INFO("clean up the vf2pf work item\n");
    624		cancel_delayed_work_sync(&adev->virt.vf2pf_work);
    625		adev->virt.vf2pf_update_interval_ms = 0;
    626	}
    627}
    628
    629void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
    630{
    631	adev->virt.fw_reserve.p_pf2vf = NULL;
    632	adev->virt.fw_reserve.p_vf2pf = NULL;
    633	adev->virt.vf2pf_update_interval_ms = 0;
    634
    635	if (adev->mman.fw_vram_usage_va != NULL) {
    636		/* go through this logic in ip_init and reset to init workqueue*/
    637		amdgpu_virt_exchange_data(adev);
    638
    639		INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
    640		schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
    641	} else if (adev->bios != NULL) {
    642		/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
    643		adev->virt.fw_reserve.p_pf2vf =
    644			(struct amd_sriov_msg_pf2vf_info_header *)
    645			(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
    646
    647		amdgpu_virt_read_pf2vf_data(adev);
    648	}
    649}
    650
    651
    652void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
    653{
    654	uint64_t bp_block_offset = 0;
    655	uint32_t bp_block_size = 0;
    656	struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
    657
    658	if (adev->mman.fw_vram_usage_va != NULL) {
    659
    660		adev->virt.fw_reserve.p_pf2vf =
    661			(struct amd_sriov_msg_pf2vf_info_header *)
    662			(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
    663		adev->virt.fw_reserve.p_vf2pf =
    664			(struct amd_sriov_msg_vf2pf_info_header *)
    665			(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
    666
    667		amdgpu_virt_read_pf2vf_data(adev);
    668		amdgpu_virt_write_vf2pf_data(adev);
    669
    670		/* bad page handling for version 2 */
    671		if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
    672				pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
    673
    674				bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
    675						((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
    676				bp_block_size = pf2vf_v2->bp_block_size;
    677
    678				if (bp_block_size && !adev->virt.ras_init_done)
    679					amdgpu_virt_init_ras_err_handler_data(adev);
    680
    681				if (adev->virt.ras_init_done)
    682					amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
    683			}
    684	}
    685}
    686
    687
    688void amdgpu_detect_virtualization(struct amdgpu_device *adev)
    689{
    690	uint32_t reg;
    691
    692	switch (adev->asic_type) {
    693	case CHIP_TONGA:
    694	case CHIP_FIJI:
    695		reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
    696		break;
    697	case CHIP_VEGA10:
    698	case CHIP_VEGA20:
    699	case CHIP_NAVI10:
    700	case CHIP_NAVI12:
    701	case CHIP_SIENNA_CICHLID:
    702	case CHIP_ARCTURUS:
    703	case CHIP_ALDEBARAN:
    704		reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
    705		break;
    706	default: /* other chip doesn't support SRIOV */
    707		reg = 0;
    708		break;
    709	}
    710
    711	if (reg & 1)
    712		adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
    713
    714	if (reg & 0x80000000)
    715		adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
    716
    717	if (!reg) {
    718		/* passthrough mode exclus sriov mod */
    719		if (is_virtual_machine() && !xen_initial_domain())
    720			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
    721	}
    722
    723	/* we have the ability to check now */
    724	if (amdgpu_sriov_vf(adev)) {
    725		switch (adev->asic_type) {
    726		case CHIP_TONGA:
    727		case CHIP_FIJI:
    728			vi_set_virt_ops(adev);
    729			break;
    730		case CHIP_VEGA10:
    731			soc15_set_virt_ops(adev);
    732#ifdef CONFIG_X86
    733			/* not send GPU_INIT_DATA with MS_HYPERV*/
    734			if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
    735#endif
    736				/* send a dummy GPU_INIT_DATA request to host on vega10 */
    737				amdgpu_virt_request_init_data(adev);
    738			break;
    739		case CHIP_VEGA20:
    740		case CHIP_ARCTURUS:
    741		case CHIP_ALDEBARAN:
    742			soc15_set_virt_ops(adev);
    743			break;
    744		case CHIP_NAVI10:
    745		case CHIP_NAVI12:
    746		case CHIP_SIENNA_CICHLID:
    747			nv_set_virt_ops(adev);
    748			/* try send GPU_INIT_DATA request to host */
    749			amdgpu_virt_request_init_data(adev);
    750			break;
    751		default: /* other chip doesn't support SRIOV */
    752			DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
    753			break;
    754		}
    755	}
    756}
    757
    758static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
    759{
    760	return amdgpu_sriov_is_debug(adev) ? true : false;
    761}
    762
    763static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
    764{
    765	return amdgpu_sriov_is_normal(adev) ? true : false;
    766}
    767
    768int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
    769{
    770	if (!amdgpu_sriov_vf(adev) ||
    771	    amdgpu_virt_access_debugfs_is_kiq(adev))
    772		return 0;
    773
    774	if (amdgpu_virt_access_debugfs_is_mmio(adev))
    775		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
    776	else
    777		return -EPERM;
    778
    779	return 0;
    780}
    781
    782void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
    783{
    784	if (amdgpu_sriov_vf(adev))
    785		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
    786}
    787
    788enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
    789{
    790	enum amdgpu_sriov_vf_mode mode;
    791
    792	if (amdgpu_sriov_vf(adev)) {
    793		if (amdgpu_sriov_is_pp_one_vf(adev))
    794			mode = SRIOV_VF_MODE_ONE_VF;
    795		else
    796			mode = SRIOV_VF_MODE_MULTI_VF;
    797	} else {
    798		mode = SRIOV_VF_MODE_BARE_METAL;
    799	}
    800
    801	return mode;
    802}
    803
    804void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
    805			struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
    806			struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
    807{
    808	uint32_t i;
    809
    810	if (!adev->virt.is_mm_bw_enabled)
    811		return;
    812
    813	if (encode) {
    814		for (i = 0; i < encode_array_size; i++) {
    815			encode[i].max_width = adev->virt.encode_max_dimension_pixels;
    816			encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
    817			if (encode[i].max_width > 0)
    818				encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
    819			else
    820				encode[i].max_height = 0;
    821		}
    822	}
    823
    824	if (decode) {
    825		for (i = 0; i < decode_array_size; i++) {
    826			decode[i].max_width = adev->virt.decode_max_dimension_pixels;
    827			decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
    828			if (decode[i].max_width > 0)
    829				decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
    830			else
    831				decode[i].max_height = 0;
    832		}
    833	}
    834}
    835
    836static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
    837						 u32 acc_flags, u32 hwip,
    838						 bool write, u32 *rlcg_flag)
    839{
    840	bool ret = false;
    841
    842	switch (hwip) {
    843	case GC_HWIP:
    844		if (amdgpu_sriov_reg_indirect_gc(adev)) {
    845			*rlcg_flag =
    846				write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
    847			ret = true;
    848		/* only in new version, AMDGPU_REGS_NO_KIQ and
    849		 * AMDGPU_REGS_RLC are enabled simultaneously */
    850		} else if ((acc_flags & AMDGPU_REGS_RLC) &&
    851				!(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
    852			*rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
    853			ret = true;
    854		}
    855		break;
    856	case MMHUB_HWIP:
    857		if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
    858		    (acc_flags & AMDGPU_REGS_RLC) && write) {
    859			*rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
    860			ret = true;
    861		}
    862		break;
    863	default:
    864		break;
    865	}
    866	return ret;
    867}
    868
    869static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
    870{
    871	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
    872	uint32_t timeout = 50000;
    873	uint32_t i, tmp;
    874	uint32_t ret = 0;
    875	void *scratch_reg0;
    876	void *scratch_reg1;
    877	void *scratch_reg2;
    878	void *scratch_reg3;
    879	void *spare_int;
    880
    881	if (!adev->gfx.rlc.rlcg_reg_access_supported) {
    882		dev_err(adev->dev,
    883			"indirect registers access through rlcg is not available\n");
    884		return 0;
    885	}
    886
    887	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
    888	scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
    889	scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
    890	scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
    891	scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
    892	if (reg_access_ctrl->spare_int)
    893		spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
    894
    895	if (offset == reg_access_ctrl->grbm_cntl) {
    896		/* if the target reg offset is grbm_cntl, write to scratch_reg2 */
    897		writel(v, scratch_reg2);
    898		writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
    899	} else if (offset == reg_access_ctrl->grbm_idx) {
    900		/* if the target reg offset is grbm_idx, write to scratch_reg3 */
    901		writel(v, scratch_reg3);
    902		writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
    903	} else {
    904		/*
    905		 * SCRATCH_REG0 	= read/write value
    906		 * SCRATCH_REG1[30:28]	= command
    907		 * SCRATCH_REG1[19:0]	= address in dword
    908		 * SCRATCH_REG1[26:24]	= Error reporting
    909		 */
    910		writel(v, scratch_reg0);
    911		writel((offset | flag), scratch_reg1);
    912		if (reg_access_ctrl->spare_int)
    913			writel(1, spare_int);
    914
    915		for (i = 0; i < timeout; i++) {
    916			tmp = readl(scratch_reg1);
    917			if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
    918				break;
    919			udelay(10);
    920		}
    921
    922		if (i >= timeout) {
    923			if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
    924				if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
    925					dev_err(adev->dev,
    926						"vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
    927				} else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
    928					dev_err(adev->dev,
    929						"wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
    930				} else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
    931					dev_err(adev->dev,
    932						"register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
    933				} else {
    934					dev_err(adev->dev,
    935						"unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
    936				}
    937			} else {
    938				dev_err(adev->dev,
    939					"timeout: rlcg faled to program reg: 0x%05x\n", offset);
    940			}
    941		}
    942	}
    943
    944	ret = readl(scratch_reg0);
    945	return ret;
    946}
    947
    948void amdgpu_sriov_wreg(struct amdgpu_device *adev,
    949		       u32 offset, u32 value,
    950		       u32 acc_flags, u32 hwip)
    951{
    952	u32 rlcg_flag;
    953
    954	if (!amdgpu_sriov_runtime(adev) &&
    955		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
    956		amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
    957		return;
    958	}
    959
    960	if (acc_flags & AMDGPU_REGS_NO_KIQ)
    961		WREG32_NO_KIQ(offset, value);
    962	else
    963		WREG32(offset, value);
    964}
    965
    966u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
    967		      u32 offset, u32 acc_flags, u32 hwip)
    968{
    969	u32 rlcg_flag;
    970
    971	if (!amdgpu_sriov_runtime(adev) &&
    972		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
    973		return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
    974
    975	if (acc_flags & AMDGPU_REGS_NO_KIQ)
    976		return RREG32_NO_KIQ(offset);
    977	else
    978		return RREG32(offset);
    979}