cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gmc_v6_0.c (31794B)


      1/*
      2 * Copyright 2014 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#include <linux/firmware.h>
     25#include <linux/module.h>
     26#include <linux/pci.h>
     27
     28#include <drm/drm_cache.h>
     29#include "amdgpu.h"
     30#include "gmc_v6_0.h"
     31#include "amdgpu_ucode.h"
     32#include "amdgpu_gem.h"
     33
     34#include "bif/bif_3_0_d.h"
     35#include "bif/bif_3_0_sh_mask.h"
     36#include "oss/oss_1_0_d.h"
     37#include "oss/oss_1_0_sh_mask.h"
     38#include "gmc/gmc_6_0_d.h"
     39#include "gmc/gmc_6_0_sh_mask.h"
     40#include "dce/dce_6_0_d.h"
     41#include "dce/dce_6_0_sh_mask.h"
     42#include "si_enums.h"
     43
     44static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
     45static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
     46static int gmc_v6_0_wait_for_idle(void *handle);
     47
     48MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
     49MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
     50MODULE_FIRMWARE("amdgpu/verde_mc.bin");
     51MODULE_FIRMWARE("amdgpu/oland_mc.bin");
     52MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
     53MODULE_FIRMWARE("amdgpu/si58_mc.bin");
     54
     55#define MC_SEQ_MISC0__MT__MASK   0xf0000000
     56#define MC_SEQ_MISC0__MT__GDDR1  0x10000000
     57#define MC_SEQ_MISC0__MT__DDR2   0x20000000
     58#define MC_SEQ_MISC0__MT__GDDR3  0x30000000
     59#define MC_SEQ_MISC0__MT__GDDR4  0x40000000
     60#define MC_SEQ_MISC0__MT__GDDR5  0x50000000
     61#define MC_SEQ_MISC0__MT__HBM    0x60000000
     62#define MC_SEQ_MISC0__MT__DDR3   0xB0000000
     63
     64static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
     65{
     66	u32 blackout;
     67
     68	gmc_v6_0_wait_for_idle((void *)adev);
     69
     70	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
     71	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
     72		/* Block CPU access */
     73		WREG32(mmBIF_FB_EN, 0);
     74		/* blackout the MC */
     75		blackout = REG_SET_FIELD(blackout,
     76					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
     77		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
     78	}
     79	/* wait for the MC to settle */
     80	udelay(100);
     81
     82}
     83
     84static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
     85{
     86	u32 tmp;
     87
     88	/* unblackout the MC */
     89	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
     90	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
     91	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
     92	/* allow CPU access */
     93	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
     94	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
     95	WREG32(mmBIF_FB_EN, tmp);
     96}
     97
     98static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
     99{
    100	const char *chip_name;
    101	char fw_name[30];
    102	int err;
    103	bool is_58_fw = false;
    104
    105	DRM_DEBUG("\n");
    106
    107	switch (adev->asic_type) {
    108	case CHIP_TAHITI:
    109		chip_name = "tahiti";
    110		break;
    111	case CHIP_PITCAIRN:
    112		chip_name = "pitcairn";
    113		break;
    114	case CHIP_VERDE:
    115		chip_name = "verde";
    116		break;
    117	case CHIP_OLAND:
    118		chip_name = "oland";
    119		break;
    120	case CHIP_HAINAN:
    121		chip_name = "hainan";
    122		break;
    123	default: BUG();
    124	}
    125
    126	/* this memory configuration requires special firmware */
    127	if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
    128		is_58_fw = true;
    129
    130	if (is_58_fw)
    131		snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
    132	else
    133		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
    134	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
    135	if (err)
    136		goto out;
    137
    138	err = amdgpu_ucode_validate(adev->gmc.fw);
    139
    140out:
    141	if (err) {
    142		dev_err(adev->dev,
    143		       "si_mc: Failed to load firmware \"%s\"\n",
    144		       fw_name);
    145		release_firmware(adev->gmc.fw);
    146		adev->gmc.fw = NULL;
    147	}
    148	return err;
    149}
    150
    151static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
    152{
    153	const __le32 *new_fw_data = NULL;
    154	u32 running;
    155	const __le32 *new_io_mc_regs = NULL;
    156	int i, regs_size, ucode_size;
    157	const struct mc_firmware_header_v1_0 *hdr;
    158
    159	if (!adev->gmc.fw)
    160		return -EINVAL;
    161
    162	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
    163
    164	amdgpu_ucode_print_mc_hdr(&hdr->header);
    165
    166	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
    167	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
    168	new_io_mc_regs = (const __le32 *)
    169		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
    170	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
    171	new_fw_data = (const __le32 *)
    172		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
    173
    174	running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
    175
    176	if (running == 0) {
    177
    178		/* reset the engine and set to writable */
    179		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
    180		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
    181
    182		/* load mc io regs */
    183		for (i = 0; i < regs_size; i++) {
    184			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
    185			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
    186		}
    187		/* load the MC ucode */
    188		for (i = 0; i < ucode_size; i++) {
    189			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
    190		}
    191
    192		/* put the engine back into the active state */
    193		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
    194		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
    195		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
    196
    197		/* wait for training to complete */
    198		for (i = 0; i < adev->usec_timeout; i++) {
    199			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
    200				break;
    201			udelay(1);
    202		}
    203		for (i = 0; i < adev->usec_timeout; i++) {
    204			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
    205				break;
    206			udelay(1);
    207		}
    208
    209	}
    210
    211	return 0;
    212}
    213
    214static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
    215				       struct amdgpu_gmc *mc)
    216{
    217	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
    218	base <<= 24;
    219
    220	amdgpu_gmc_vram_location(adev, mc, base);
    221	amdgpu_gmc_gart_location(adev, mc);
    222}
    223
    224static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
    225{
    226	int i, j;
    227
    228	/* Initialize HDP */
    229	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
    230		WREG32((0xb05 + j), 0x00000000);
    231		WREG32((0xb06 + j), 0x00000000);
    232		WREG32((0xb07 + j), 0x00000000);
    233		WREG32((0xb08 + j), 0x00000000);
    234		WREG32((0xb09 + j), 0x00000000);
    235	}
    236	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
    237
    238	if (gmc_v6_0_wait_for_idle((void *)adev)) {
    239		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
    240	}
    241
    242	if (adev->mode_info.num_crtc) {
    243		u32 tmp;
    244
    245		/* Lockout access through VGA aperture*/
    246		tmp = RREG32(mmVGA_HDP_CONTROL);
    247		tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
    248		WREG32(mmVGA_HDP_CONTROL, tmp);
    249
    250		/* disable VGA render */
    251		tmp = RREG32(mmVGA_RENDER_CONTROL);
    252		tmp &= ~VGA_VSTATUS_CNTL;
    253		WREG32(mmVGA_RENDER_CONTROL, tmp);
    254	}
    255	/* Update configuration */
    256	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
    257	       adev->gmc.vram_start >> 12);
    258	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
    259	       adev->gmc.vram_end >> 12);
    260	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
    261	       adev->vram_scratch.gpu_addr >> 12);
    262	WREG32(mmMC_VM_AGP_BASE, 0);
    263	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
    264	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
    265
    266	if (gmc_v6_0_wait_for_idle((void *)adev)) {
    267		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
    268	}
    269}
    270
    271static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
    272{
    273
    274	u32 tmp;
    275	int chansize, numchan;
    276	int r;
    277
    278	tmp = RREG32(mmMC_ARB_RAMCFG);
    279	if (tmp & (1 << 11)) {
    280		chansize = 16;
    281	} else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
    282		chansize = 64;
    283	} else {
    284		chansize = 32;
    285	}
    286	tmp = RREG32(mmMC_SHARED_CHMAP);
    287	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
    288	case 0:
    289	default:
    290		numchan = 1;
    291		break;
    292	case 1:
    293		numchan = 2;
    294		break;
    295	case 2:
    296		numchan = 4;
    297		break;
    298	case 3:
    299		numchan = 8;
    300		break;
    301	case 4:
    302		numchan = 3;
    303		break;
    304	case 5:
    305		numchan = 6;
    306		break;
    307	case 6:
    308		numchan = 10;
    309		break;
    310	case 7:
    311		numchan = 12;
    312		break;
    313	case 8:
    314		numchan = 16;
    315		break;
    316	}
    317	adev->gmc.vram_width = numchan * chansize;
    318	/* size in MB on si */
    319	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
    320	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
    321
    322	if (!(adev->flags & AMD_IS_APU)) {
    323		r = amdgpu_device_resize_fb_bar(adev);
    324		if (r)
    325			return r;
    326	}
    327	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
    328	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
    329	adev->gmc.visible_vram_size = adev->gmc.aper_size;
    330
    331	/* set the gart size */
    332	if (amdgpu_gart_size == -1) {
    333		switch (adev->asic_type) {
    334		case CHIP_HAINAN:    /* no MM engines */
    335		default:
    336			adev->gmc.gart_size = 256ULL << 20;
    337			break;
    338		case CHIP_VERDE:    /* UVD, VCE do not support GPUVM */
    339		case CHIP_TAHITI:   /* UVD, VCE do not support GPUVM */
    340		case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
    341		case CHIP_OLAND:    /* UVD, VCE do not support GPUVM */
    342			adev->gmc.gart_size = 1024ULL << 20;
    343			break;
    344		}
    345	} else {
    346		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
    347	}
    348
    349	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
    350	gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
    351
    352	return 0;
    353}
    354
    355static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
    356					uint32_t vmhub, uint32_t flush_type)
    357{
    358	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
    359}
    360
    361static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
    362					    unsigned vmid, uint64_t pd_addr)
    363{
    364	uint32_t reg;
    365
    366	/* write new base address */
    367	if (vmid < 8)
    368		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
    369	else
    370		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
    371	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
    372
    373	/* bits 0-15 are the VM contexts0-15 */
    374	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
    375
    376	return pd_addr;
    377}
    378
    379static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
    380				uint64_t *addr, uint64_t *flags)
    381{
    382	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
    383}
    384
    385static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
    386				struct amdgpu_bo_va_mapping *mapping,
    387				uint64_t *flags)
    388{
    389	*flags &= ~AMDGPU_PTE_EXECUTABLE;
    390	*flags &= ~AMDGPU_PTE_PRT;
    391}
    392
    393static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
    394					      bool value)
    395{
    396	u32 tmp;
    397
    398	tmp = RREG32(mmVM_CONTEXT1_CNTL);
    399	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    400			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    401	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    402			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    403	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    404			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    405	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    406			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    407	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    408			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    409	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    410			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    411	WREG32(mmVM_CONTEXT1_CNTL, tmp);
    412}
    413
    414 /**
    415   + * gmc_v8_0_set_prt - set PRT VM fault
    416   + *
    417   + * @adev: amdgpu_device pointer
    418   + * @enable: enable/disable VM fault handling for PRT
    419   +*/
    420static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
    421{
    422	u32 tmp;
    423
    424	if (enable && !adev->gmc.prt_warning) {
    425		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
    426		adev->gmc.prt_warning = true;
    427	}
    428
    429	tmp = RREG32(mmVM_PRT_CNTL);
    430	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
    431			    CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
    432			    enable);
    433	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
    434			    TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
    435			    enable);
    436	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
    437			    L2_CACHE_STORE_INVALID_ENTRIES,
    438			    enable);
    439	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
    440			    L1_TLB_STORE_INVALID_ENTRIES,
    441			    enable);
    442	WREG32(mmVM_PRT_CNTL, tmp);
    443
    444	if (enable) {
    445		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
    446		uint32_t high = adev->vm_manager.max_pfn -
    447			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
    448
    449		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
    450		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
    451		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
    452		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
    453		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
    454		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
    455		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
    456		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
    457	} else {
    458		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
    459		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
    460		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
    461		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
    462		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
    463		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
    464		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
    465		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
    466	}
    467}
    468
    469static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
    470{
    471	uint64_t table_addr;
    472	u32 field;
    473	int i;
    474
    475	if (adev->gart.bo == NULL) {
    476		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
    477		return -EINVAL;
    478	}
    479	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
    480
    481	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
    482
    483	/* Setup TLB control */
    484	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
    485	       (0xA << 7) |
    486	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
    487	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
    488	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
    489	       MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
    490	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
    491	/* Setup L2 cache */
    492	WREG32(mmVM_L2_CNTL,
    493	       VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
    494	       VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
    495	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
    496	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
    497	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
    498	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
    499	WREG32(mmVM_L2_CNTL2,
    500	       VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
    501	       VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
    502
    503	field = adev->vm_manager.fragment_size;
    504	WREG32(mmVM_L2_CNTL3,
    505	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
    506	       (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
    507	       (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
    508	/* setup context0 */
    509	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
    510	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
    511	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
    512	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
    513			(u32)(adev->dummy_page_addr >> 12));
    514	WREG32(mmVM_CONTEXT0_CNTL2, 0);
    515	WREG32(mmVM_CONTEXT0_CNTL,
    516	       VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
    517	       (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
    518	       VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
    519
    520	WREG32(0x575, 0);
    521	WREG32(0x576, 0);
    522	WREG32(0x577, 0);
    523
    524	/* empty context1-15 */
    525	/* set vm size, must be a multiple of 4 */
    526	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
    527	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
    528	/* Assign the pt base to something valid for now; the pts used for
    529	 * the VMs are determined by the application and setup and assigned
    530	 * on the fly in the vm part of radeon_gart.c
    531	 */
    532	for (i = 1; i < AMDGPU_NUM_VMID; i++) {
    533		if (i < 8)
    534			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
    535			       table_addr >> 12);
    536		else
    537			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
    538			       table_addr >> 12);
    539	}
    540
    541	/* enable context1-15 */
    542	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
    543	       (u32)(adev->dummy_page_addr >> 12));
    544	WREG32(mmVM_CONTEXT1_CNTL2, 4);
    545	WREG32(mmVM_CONTEXT1_CNTL,
    546	       VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
    547	       (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
    548	       ((adev->vm_manager.block_size - 9)
    549	       << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
    550	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
    551		gmc_v6_0_set_fault_enable_default(adev, false);
    552	else
    553		gmc_v6_0_set_fault_enable_default(adev, true);
    554
    555	gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
    556	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
    557		 (unsigned)(adev->gmc.gart_size >> 20),
    558		 (unsigned long long)table_addr);
    559	return 0;
    560}
    561
    562static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
    563{
    564	int r;
    565
    566	if (adev->gart.bo) {
    567		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
    568		return 0;
    569	}
    570	r = amdgpu_gart_init(adev);
    571	if (r)
    572		return r;
    573	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
    574	adev->gart.gart_pte_flags = 0;
    575	return amdgpu_gart_table_vram_alloc(adev);
    576}
    577
    578static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
    579{
    580	/*unsigned i;
    581
    582	for (i = 1; i < 16; ++i) {
    583		uint32_t reg;
    584		if (i < 8)
    585			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
    586		else
    587			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
    588		adev->vm_manager.saved_table_addr[i] = RREG32(reg);
    589	}*/
    590
    591	/* Disable all tables */
    592	WREG32(mmVM_CONTEXT0_CNTL, 0);
    593	WREG32(mmVM_CONTEXT1_CNTL, 0);
    594	/* Setup TLB control */
    595	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
    596	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
    597	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
    598	/* Setup L2 cache */
    599	WREG32(mmVM_L2_CNTL,
    600	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
    601	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
    602	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
    603	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
    604	WREG32(mmVM_L2_CNTL2, 0);
    605	WREG32(mmVM_L2_CNTL3,
    606	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
    607	       (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
    608}
    609
    610static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
    611				     u32 status, u32 addr, u32 mc_client)
    612{
    613	u32 mc_id;
    614	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
    615	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
    616					PROTECTIONS);
    617	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
    618		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
    619
    620	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
    621			      MEMORY_CLIENT_ID);
    622
    623	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
    624	       protections, vmid, addr,
    625	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
    626			     MEMORY_CLIENT_RW) ?
    627	       "write" : "read", block, mc_client, mc_id);
    628}
    629
    630/*
    631static const u32 mc_cg_registers[] = {
    632	MC_HUB_MISC_HUB_CG,
    633	MC_HUB_MISC_SIP_CG,
    634	MC_HUB_MISC_VM_CG,
    635	MC_XPB_CLK_GAT,
    636	ATC_MISC_CG,
    637	MC_CITF_MISC_WR_CG,
    638	MC_CITF_MISC_RD_CG,
    639	MC_CITF_MISC_VM_CG,
    640	VM_L2_CG,
    641};
    642
    643static const u32 mc_cg_ls_en[] = {
    644	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
    645	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
    646	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
    647	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
    648	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
    649	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
    650	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
    651	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
    652	VM_L2_CG__MEM_LS_ENABLE_MASK,
    653};
    654
    655static const u32 mc_cg_en[] = {
    656	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
    657	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
    658	MC_HUB_MISC_VM_CG__ENABLE_MASK,
    659	MC_XPB_CLK_GAT__ENABLE_MASK,
    660	ATC_MISC_CG__ENABLE_MASK,
    661	MC_CITF_MISC_WR_CG__ENABLE_MASK,
    662	MC_CITF_MISC_RD_CG__ENABLE_MASK,
    663	MC_CITF_MISC_VM_CG__ENABLE_MASK,
    664	VM_L2_CG__ENABLE_MASK,
    665};
    666
    667static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
    668				  bool enable)
    669{
    670	int i;
    671	u32 orig, data;
    672
    673	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
    674		orig = data = RREG32(mc_cg_registers[i]);
    675		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
    676			data |= mc_cg_ls_en[i];
    677		else
    678			data &= ~mc_cg_ls_en[i];
    679		if (data != orig)
    680			WREG32(mc_cg_registers[i], data);
    681	}
    682}
    683
    684static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
    685				    bool enable)
    686{
    687	int i;
    688	u32 orig, data;
    689
    690	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
    691		orig = data = RREG32(mc_cg_registers[i]);
    692		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
    693			data |= mc_cg_en[i];
    694		else
    695			data &= ~mc_cg_en[i];
    696		if (data != orig)
    697			WREG32(mc_cg_registers[i], data);
    698	}
    699}
    700
    701static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
    702				     bool enable)
    703{
    704	u32 orig, data;
    705
    706	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
    707
    708	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
    709		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
    710		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
    711		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
    712		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
    713	} else {
    714		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
    715		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
    716		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
    717		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
    718	}
    719
    720	if (orig != data)
    721		WREG32_PCIE(ixPCIE_CNTL2, data);
    722}
    723
    724static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
    725				     bool enable)
    726{
    727	u32 orig, data;
    728
    729	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
    730
    731	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
    732		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
    733	else
    734		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
    735
    736	if (orig != data)
    737		WREG32(mmHDP_HOST_PATH_CNTL, data);
    738}
    739
    740static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
    741				   bool enable)
    742{
    743	u32 orig, data;
    744
    745	orig = data = RREG32(mmHDP_MEM_POWER_LS);
    746
    747	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
    748		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
    749	else
    750		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
    751
    752	if (orig != data)
    753		WREG32(mmHDP_MEM_POWER_LS, data);
    754}
    755*/
    756
    757static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
    758{
    759	switch (mc_seq_vram_type) {
    760	case MC_SEQ_MISC0__MT__GDDR1:
    761		return AMDGPU_VRAM_TYPE_GDDR1;
    762	case MC_SEQ_MISC0__MT__DDR2:
    763		return AMDGPU_VRAM_TYPE_DDR2;
    764	case MC_SEQ_MISC0__MT__GDDR3:
    765		return AMDGPU_VRAM_TYPE_GDDR3;
    766	case MC_SEQ_MISC0__MT__GDDR4:
    767		return AMDGPU_VRAM_TYPE_GDDR4;
    768	case MC_SEQ_MISC0__MT__GDDR5:
    769		return AMDGPU_VRAM_TYPE_GDDR5;
    770	case MC_SEQ_MISC0__MT__DDR3:
    771		return AMDGPU_VRAM_TYPE_DDR3;
    772	default:
    773		return AMDGPU_VRAM_TYPE_UNKNOWN;
    774	}
    775}
    776
    777static int gmc_v6_0_early_init(void *handle)
    778{
    779	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    780
    781	gmc_v6_0_set_gmc_funcs(adev);
    782	gmc_v6_0_set_irq_funcs(adev);
    783
    784	return 0;
    785}
    786
    787static int gmc_v6_0_late_init(void *handle)
    788{
    789	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    790
    791	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
    792		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
    793	else
    794		return 0;
    795}
    796
    797static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
    798{
    799	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
    800	unsigned size;
    801
    802	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
    803		size = AMDGPU_VBIOS_VGA_ALLOCATION;
    804	} else {
    805		u32 viewport = RREG32(mmVIEWPORT_SIZE);
    806		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
    807			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
    808			4);
    809	}
    810	return size;
    811}
    812
    813static int gmc_v6_0_sw_init(void *handle)
    814{
    815	int r;
    816	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    817
    818	adev->num_vmhubs = 1;
    819
    820	if (adev->flags & AMD_IS_APU) {
    821		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
    822	} else {
    823		u32 tmp = RREG32(mmMC_SEQ_MISC0);
    824		tmp &= MC_SEQ_MISC0__MT__MASK;
    825		adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
    826	}
    827
    828	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
    829	if (r)
    830		return r;
    831
    832	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
    833	if (r)
    834		return r;
    835
    836	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
    837
    838	adev->gmc.mc_mask = 0xffffffffffULL;
    839
    840	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
    841	if (r) {
    842		dev_warn(adev->dev, "No suitable DMA available.\n");
    843		return r;
    844	}
    845	adev->need_swiotlb = drm_need_swiotlb(40);
    846
    847	r = gmc_v6_0_init_microcode(adev);
    848	if (r) {
    849		dev_err(adev->dev, "Failed to load mc firmware!\n");
    850		return r;
    851	}
    852
    853	r = gmc_v6_0_mc_init(adev);
    854	if (r)
    855		return r;
    856
    857	amdgpu_gmc_get_vbios_allocations(adev);
    858
    859	r = amdgpu_bo_init(adev);
    860	if (r)
    861		return r;
    862
    863	r = gmc_v6_0_gart_init(adev);
    864	if (r)
    865		return r;
    866
    867	/*
    868	 * number of VMs
    869	 * VMID 0 is reserved for System
    870	 * amdgpu graphics/compute will use VMIDs 1-7
    871	 * amdkfd will use VMIDs 8-15
    872	 */
    873	adev->vm_manager.first_kfd_vmid = 8;
    874	amdgpu_vm_manager_init(adev);
    875
    876	/* base offset of vram pages */
    877	if (adev->flags & AMD_IS_APU) {
    878		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
    879
    880		tmp <<= 22;
    881		adev->vm_manager.vram_base_offset = tmp;
    882	} else {
    883		adev->vm_manager.vram_base_offset = 0;
    884	}
    885
    886	return 0;
    887}
    888
    889static int gmc_v6_0_sw_fini(void *handle)
    890{
    891	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    892
    893	amdgpu_gem_force_release(adev);
    894	amdgpu_vm_manager_fini(adev);
    895	amdgpu_gart_table_vram_free(adev);
    896	amdgpu_bo_fini(adev);
    897	release_firmware(adev->gmc.fw);
    898	adev->gmc.fw = NULL;
    899
    900	return 0;
    901}
    902
    903static int gmc_v6_0_hw_init(void *handle)
    904{
    905	int r;
    906	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    907
    908	gmc_v6_0_mc_program(adev);
    909
    910	if (!(adev->flags & AMD_IS_APU)) {
    911		r = gmc_v6_0_mc_load_microcode(adev);
    912		if (r) {
    913			dev_err(adev->dev, "Failed to load MC firmware!\n");
    914			return r;
    915		}
    916	}
    917
    918	r = gmc_v6_0_gart_enable(adev);
    919	if (r)
    920		return r;
    921
    922	if (amdgpu_emu_mode == 1)
    923		return amdgpu_gmc_vram_checking(adev);
    924	else
    925		return r;
    926}
    927
    928static int gmc_v6_0_hw_fini(void *handle)
    929{
    930	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    931
    932	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
    933	gmc_v6_0_gart_disable(adev);
    934
    935	return 0;
    936}
    937
    938static int gmc_v6_0_suspend(void *handle)
    939{
    940	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    941
    942	gmc_v6_0_hw_fini(adev);
    943
    944	return 0;
    945}
    946
    947static int gmc_v6_0_resume(void *handle)
    948{
    949	int r;
    950	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    951
    952	r = gmc_v6_0_hw_init(adev);
    953	if (r)
    954		return r;
    955
    956	amdgpu_vmid_reset_all(adev);
    957
    958	return 0;
    959}
    960
    961static bool gmc_v6_0_is_idle(void *handle)
    962{
    963	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    964	u32 tmp = RREG32(mmSRBM_STATUS);
    965
    966	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
    967		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
    968		return false;
    969
    970	return true;
    971}
    972
    973static int gmc_v6_0_wait_for_idle(void *handle)
    974{
    975	unsigned i;
    976	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    977
    978	for (i = 0; i < adev->usec_timeout; i++) {
    979		if (gmc_v6_0_is_idle(handle))
    980			return 0;
    981		udelay(1);
    982	}
    983	return -ETIMEDOUT;
    984
    985}
    986
    987static int gmc_v6_0_soft_reset(void *handle)
    988{
    989	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    990	u32 srbm_soft_reset = 0;
    991	u32 tmp = RREG32(mmSRBM_STATUS);
    992
    993	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
    994		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
    995						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
    996
    997	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
    998		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
    999		if (!(adev->flags & AMD_IS_APU))
   1000			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
   1001							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
   1002	}
   1003
   1004	if (srbm_soft_reset) {
   1005		gmc_v6_0_mc_stop(adev);
   1006		if (gmc_v6_0_wait_for_idle(adev)) {
   1007			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
   1008		}
   1009
   1010
   1011		tmp = RREG32(mmSRBM_SOFT_RESET);
   1012		tmp |= srbm_soft_reset;
   1013		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
   1014		WREG32(mmSRBM_SOFT_RESET, tmp);
   1015		tmp = RREG32(mmSRBM_SOFT_RESET);
   1016
   1017		udelay(50);
   1018
   1019		tmp &= ~srbm_soft_reset;
   1020		WREG32(mmSRBM_SOFT_RESET, tmp);
   1021		tmp = RREG32(mmSRBM_SOFT_RESET);
   1022
   1023		udelay(50);
   1024
   1025		gmc_v6_0_mc_resume(adev);
   1026		udelay(50);
   1027	}
   1028
   1029	return 0;
   1030}
   1031
   1032static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
   1033					     struct amdgpu_irq_src *src,
   1034					     unsigned type,
   1035					     enum amdgpu_interrupt_state state)
   1036{
   1037	u32 tmp;
   1038	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
   1039		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
   1040		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
   1041		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
   1042		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
   1043		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
   1044
   1045	switch (state) {
   1046	case AMDGPU_IRQ_STATE_DISABLE:
   1047		tmp = RREG32(mmVM_CONTEXT0_CNTL);
   1048		tmp &= ~bits;
   1049		WREG32(mmVM_CONTEXT0_CNTL, tmp);
   1050		tmp = RREG32(mmVM_CONTEXT1_CNTL);
   1051		tmp &= ~bits;
   1052		WREG32(mmVM_CONTEXT1_CNTL, tmp);
   1053		break;
   1054	case AMDGPU_IRQ_STATE_ENABLE:
   1055		tmp = RREG32(mmVM_CONTEXT0_CNTL);
   1056		tmp |= bits;
   1057		WREG32(mmVM_CONTEXT0_CNTL, tmp);
   1058		tmp = RREG32(mmVM_CONTEXT1_CNTL);
   1059		tmp |= bits;
   1060		WREG32(mmVM_CONTEXT1_CNTL, tmp);
   1061		break;
   1062	default:
   1063		break;
   1064	}
   1065
   1066	return 0;
   1067}
   1068
   1069static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
   1070				      struct amdgpu_irq_src *source,
   1071				      struct amdgpu_iv_entry *entry)
   1072{
   1073	u32 addr, status;
   1074
   1075	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
   1076	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
   1077	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
   1078
   1079	if (!addr && !status)
   1080		return 0;
   1081
   1082	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
   1083		gmc_v6_0_set_fault_enable_default(adev, false);
   1084
   1085	if (printk_ratelimit()) {
   1086		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
   1087			entry->src_id, entry->src_data[0]);
   1088		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
   1089			addr);
   1090		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
   1091			status);
   1092		gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
   1093	}
   1094
   1095	return 0;
   1096}
   1097
   1098static int gmc_v6_0_set_clockgating_state(void *handle,
   1099					  enum amd_clockgating_state state)
   1100{
   1101	return 0;
   1102}
   1103
   1104static int gmc_v6_0_set_powergating_state(void *handle,
   1105					  enum amd_powergating_state state)
   1106{
   1107	return 0;
   1108}
   1109
   1110static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
   1111	.name = "gmc_v6_0",
   1112	.early_init = gmc_v6_0_early_init,
   1113	.late_init = gmc_v6_0_late_init,
   1114	.sw_init = gmc_v6_0_sw_init,
   1115	.sw_fini = gmc_v6_0_sw_fini,
   1116	.hw_init = gmc_v6_0_hw_init,
   1117	.hw_fini = gmc_v6_0_hw_fini,
   1118	.suspend = gmc_v6_0_suspend,
   1119	.resume = gmc_v6_0_resume,
   1120	.is_idle = gmc_v6_0_is_idle,
   1121	.wait_for_idle = gmc_v6_0_wait_for_idle,
   1122	.soft_reset = gmc_v6_0_soft_reset,
   1123	.set_clockgating_state = gmc_v6_0_set_clockgating_state,
   1124	.set_powergating_state = gmc_v6_0_set_powergating_state,
   1125};
   1126
   1127static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
   1128	.flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
   1129	.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
   1130	.set_prt = gmc_v6_0_set_prt,
   1131	.get_vm_pde = gmc_v6_0_get_vm_pde,
   1132	.get_vm_pte = gmc_v6_0_get_vm_pte,
   1133	.get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size,
   1134};
   1135
   1136static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
   1137	.set = gmc_v6_0_vm_fault_interrupt_state,
   1138	.process = gmc_v6_0_process_interrupt,
   1139};
   1140
   1141static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
   1142{
   1143	adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
   1144}
   1145
   1146static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
   1147{
   1148	adev->gmc.vm_fault.num_types = 1;
   1149	adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
   1150}
   1151
   1152const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
   1153{
   1154	.type = AMD_IP_BLOCK_TYPE_GMC,
   1155	.major = 6,
   1156	.minor = 0,
   1157	.rev = 0,
   1158	.funcs = &gmc_v6_0_ip_funcs,
   1159};