cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gfxhub_v1_0.c (16381B)


      1/*
      2 * Copyright 2016 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23#include "amdgpu.h"
     24#include "gfxhub_v1_0.h"
     25#include "gfxhub_v1_1.h"
     26
     27#include "gc/gc_9_0_offset.h"
     28#include "gc/gc_9_0_sh_mask.h"
     29#include "gc/gc_9_0_default.h"
     30#include "vega10_enum.h"
     31
     32#include "soc15_common.h"
     33
     34static u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev)
     35{
     36	return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24;
     37}
     38
     39static void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev,
     40					 uint32_t vmid,
     41					 uint64_t page_table_base)
     42{
     43	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
     44
     45	WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
     46			    hub->ctx_addr_distance * vmid,
     47			    lower_32_bits(page_table_base));
     48
     49	WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
     50			    hub->ctx_addr_distance * vmid,
     51			    upper_32_bits(page_table_base));
     52}
     53
     54static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
     55{
     56	uint64_t pt_base;
     57
     58	if (adev->gmc.pdb0_bo)
     59		pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
     60	else
     61		pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
     62
     63	gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
     64
     65	/* If use GART for FB translation, vmid0 page table covers both
     66	 * vram and system memory (gart)
     67	 */
     68	if (adev->gmc.pdb0_bo) {
     69		WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
     70				(u32)(adev->gmc.fb_start >> 12));
     71		WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
     72				(u32)(adev->gmc.fb_start >> 44));
     73
     74		WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
     75				(u32)(adev->gmc.gart_end >> 12));
     76		WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
     77				(u32)(adev->gmc.gart_end >> 44));
     78	} else {
     79		WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
     80				(u32)(adev->gmc.gart_start >> 12));
     81		WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
     82				(u32)(adev->gmc.gart_start >> 44));
     83
     84		WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
     85				(u32)(adev->gmc.gart_end >> 12));
     86		WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
     87				(u32)(adev->gmc.gart_end >> 44));
     88	}
     89}
     90
     91static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
     92{
     93	uint64_t value;
     94
     95	/* Program the AGP BAR */
     96	WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
     97	WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
     98	WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
     99
    100	if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
    101		/* Program the system aperture low logical page number. */
    102		WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
    103			min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
    104
    105		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
    106			/*
    107			* Raven2 has a HW issue that it is unable to use the
    108			* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
    109			* So here is the workaround that increase system
    110			* aperture high address (add 1) to get rid of the VM
    111			* fault and hardware hang.
    112			*/
    113			WREG32_SOC15_RLC(GC, 0,
    114					 mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
    115					 max((adev->gmc.fb_end >> 18) + 0x1,
    116					     adev->gmc.agp_end >> 18));
    117		else
    118			WREG32_SOC15_RLC(
    119				GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
    120				max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
    121
    122		/* Set default page address. */
    123		value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
    124		WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
    125			     (u32)(value >> 12));
    126		WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
    127			     (u32)(value >> 44));
    128
    129		/* Program "protection fault". */
    130		WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
    131			     (u32)(adev->dummy_page_addr >> 12));
    132		WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
    133			     (u32)((u64)adev->dummy_page_addr >> 44));
    134
    135		WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
    136			       ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
    137	}
    138
    139	/* In the case squeezing vram into GART aperture, we don't use
    140	 * FB aperture and AGP aperture. Disable them.
    141	 */
    142	if (adev->gmc.pdb0_bo) {
    143		WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0);
    144		WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
    145		WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
    146		WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
    147		WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
    148		WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
    149	}
    150}
    151
    152static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
    153{
    154	uint32_t tmp;
    155
    156	/* Setup TLB control */
    157	tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL);
    158
    159	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
    160	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
    161	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
    162			    ENABLE_ADVANCED_DRIVER_MODEL, 1);
    163	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
    164			    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
    165	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
    166			    MTYPE, MTYPE_UC);/* XXX for emulation. */
    167	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
    168
    169	WREG32_SOC15_RLC(GC, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
    170}
    171
    172static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
    173{
    174	uint32_t tmp;
    175
    176	/* Setup L2 cache */
    177	tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL);
    178	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
    179	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
    180	/* XXX for emulation, Refer to closed source code.*/
    181	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
    182			    0);
    183	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
    184	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
    185	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
    186	WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL, tmp);
    187
    188	tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL2);
    189	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
    190	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
    191	WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL2, tmp);
    192
    193	tmp = mmVM_L2_CNTL3_DEFAULT;
    194	if (adev->gmc.translate_further) {
    195		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
    196		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
    197				    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
    198	} else {
    199		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
    200		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
    201				    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
    202	}
    203	WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL3, tmp);
    204
    205	tmp = mmVM_L2_CNTL4_DEFAULT;
    206	if (adev->gmc.xgmi.connected_to_cpu) {
    207		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
    208		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
    209	} else {
    210		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
    211		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
    212	}
    213	WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL4, tmp);
    214}
    215
    216static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
    217{
    218	uint32_t tmp;
    219
    220	tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
    221	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
    222	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
    223			adev->gmc.vmid0_page_table_depth);
    224	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
    225			adev->gmc.vmid0_page_table_block_size);
    226	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
    227			    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
    228	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
    229}
    230
    231static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
    232{
    233	WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
    234		     0XFFFFFFFF);
    235	WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
    236		     0x0000000F);
    237
    238	WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
    239		     0);
    240	WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
    241		     0);
    242
    243	WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
    244	WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
    245
    246}
    247
    248static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
    249{
    250	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    251	unsigned num_level, block_size;
    252	uint32_t tmp;
    253	int i;
    254
    255	num_level = adev->vm_manager.num_level;
    256	block_size = adev->vm_manager.block_size;
    257	if (adev->gmc.translate_further)
    258		num_level -= 1;
    259	else
    260		block_size -= 9;
    261
    262	for (i = 0; i <= 14; i++) {
    263		tmp = RREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i);
    264		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
    265		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
    266				    num_level);
    267		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    268				    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    269		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    270				    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
    271				    1);
    272		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    273				    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    274		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    275				    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    276		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    277				    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    278		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    279				    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    280		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    281				    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    282		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    283				    PAGE_TABLE_BLOCK_SIZE,
    284				    block_size);
    285		/* Send no-retry XNACK on fault to suppress VM fault storm.
    286		 * On Aldebaran, XNACK can be enabled in the SQ per-process.
    287		 * Retry faults need to be enabled for that to work.
    288		 */
    289		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    290				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
    291				    !adev->gmc.noretry ||
    292				    adev->asic_type == CHIP_ALDEBARAN);
    293		WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL,
    294				    i * hub->ctx_distance, tmp);
    295		WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
    296				    i * hub->ctx_addr_distance, 0);
    297		WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
    298				    i * hub->ctx_addr_distance, 0);
    299		WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
    300				    i * hub->ctx_addr_distance,
    301				    lower_32_bits(adev->vm_manager.max_pfn - 1));
    302		WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
    303				    i * hub->ctx_addr_distance,
    304				    upper_32_bits(adev->vm_manager.max_pfn - 1));
    305	}
    306}
    307
    308static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
    309{
    310	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    311	unsigned i;
    312
    313	for (i = 0 ; i < 18; ++i) {
    314		WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
    315				    i * hub->eng_addr_distance, 0xffffffff);
    316		WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
    317				    i * hub->eng_addr_distance, 0x1f);
    318	}
    319}
    320
    321static int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
    322{
    323	/* GART Enable. */
    324	gfxhub_v1_0_init_gart_aperture_regs(adev);
    325	gfxhub_v1_0_init_system_aperture_regs(adev);
    326	gfxhub_v1_0_init_tlb_regs(adev);
    327	if (!amdgpu_sriov_vf(adev))
    328		gfxhub_v1_0_init_cache_regs(adev);
    329
    330	gfxhub_v1_0_enable_system_domain(adev);
    331	if (!amdgpu_sriov_vf(adev))
    332		gfxhub_v1_0_disable_identity_aperture(adev);
    333	gfxhub_v1_0_setup_vmid_config(adev);
    334	gfxhub_v1_0_program_invalidation(adev);
    335
    336	return 0;
    337}
    338
    339static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
    340{
    341	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    342	u32 tmp;
    343	u32 i;
    344
    345	/* Disable all tables */
    346	for (i = 0; i < 16; i++)
    347		WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL,
    348				    i * hub->ctx_distance, 0);
    349
    350	if (amdgpu_sriov_vf(adev))
    351		/* Avoid write to GMC registers */
    352		return;
    353
    354	/* Setup TLB control */
    355	tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL);
    356	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
    357	tmp = REG_SET_FIELD(tmp,
    358				MC_VM_MX_L1_TLB_CNTL,
    359				ENABLE_ADVANCED_DRIVER_MODEL,
    360				0);
    361	WREG32_SOC15_RLC(GC, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
    362
    363	/* Setup L2 cache */
    364	WREG32_FIELD15(GC, 0, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
    365	WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, 0);
    366}
    367
    368/**
    369 * gfxhub_v1_0_set_fault_enable_default - update GART/VM fault handling
    370 *
    371 * @adev: amdgpu_device pointer
    372 * @value: true redirects VM faults to the default page
    373 */
    374static void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
    375						 bool value)
    376{
    377	u32 tmp;
    378	tmp = RREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
    379	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    380			RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    381	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    382			PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    383	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    384			PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    385	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    386			PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    387	tmp = REG_SET_FIELD(tmp,
    388			VM_L2_PROTECTION_FAULT_CNTL,
    389			TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
    390			value);
    391	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    392			NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    393	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    394			DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    395	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    396			VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    397	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    398			READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    399	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    400			WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    401	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    402			EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    403	if (!value) {
    404		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    405				CRASH_ON_NO_RETRY_FAULT, 1);
    406		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    407				CRASH_ON_RETRY_FAULT, 1);
    408	}
    409	WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
    410}
    411
    412static void gfxhub_v1_0_init(struct amdgpu_device *adev)
    413{
    414	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    415
    416	hub->ctx0_ptb_addr_lo32 =
    417		SOC15_REG_OFFSET(GC, 0,
    418				 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
    419	hub->ctx0_ptb_addr_hi32 =
    420		SOC15_REG_OFFSET(GC, 0,
    421				 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
    422	hub->vm_inv_eng0_sem =
    423		SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM);
    424	hub->vm_inv_eng0_req =
    425		SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ);
    426	hub->vm_inv_eng0_ack =
    427		SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ACK);
    428	hub->vm_context0_cntl =
    429		SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL);
    430	hub->vm_l2_pro_fault_status =
    431		SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
    432	hub->vm_l2_pro_fault_cntl =
    433		SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
    434
    435	hub->ctx_distance = mmVM_CONTEXT1_CNTL - mmVM_CONTEXT0_CNTL;
    436	hub->ctx_addr_distance = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
    437		mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
    438	hub->eng_distance = mmVM_INVALIDATE_ENG1_REQ - mmVM_INVALIDATE_ENG0_REQ;
    439	hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
    440		mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
    441}
    442
    443
    444const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
    445	.get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
    446	.setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
    447	.gart_enable = gfxhub_v1_0_gart_enable,
    448	.gart_disable = gfxhub_v1_0_gart_disable,
    449	.set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
    450	.init = gfxhub_v1_0_init,
    451	.get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
    452};