cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gfxhub_v2_0.c (17546B)


      1/*
      2 * Copyright 2019 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#include "amdgpu.h"
     25#include "gfxhub_v2_0.h"
     26
     27#include "gc/gc_10_1_0_offset.h"
     28#include "gc/gc_10_1_0_sh_mask.h"
     29#include "gc/gc_10_1_0_default.h"
     30#include "navi10_enum.h"
     31
     32#include "soc15_common.h"
     33
     34static const char *gfxhub_client_ids[] = {
     35	"CB/DB",
     36	"Reserved",
     37	"GE1",
     38	"GE2",
     39	"CPF",
     40	"CPC",
     41	"CPG",
     42	"RLC",
     43	"TCP",
     44	"SQC (inst)",
     45	"SQC (data)",
     46	"SQG",
     47	"Reserved",
     48	"SDMA0",
     49	"SDMA1",
     50	"GCR",
     51	"SDMA2",
     52	"SDMA3",
     53};
     54
     55static uint32_t gfxhub_v2_0_get_invalidate_req(unsigned int vmid,
     56					       uint32_t flush_type)
     57{
     58	u32 req = 0;
     59
     60	/* invalidate using legacy mode on vmid*/
     61	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
     62			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
     63	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
     64	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
     65	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
     66	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
     67	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
     68	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
     69	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
     70			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
     71
     72	return req;
     73}
     74
     75static void
     76gfxhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
     77					     uint32_t status)
     78{
     79	u32 cid = REG_GET_FIELD(status,
     80				GCVM_L2_PROTECTION_FAULT_STATUS, CID);
     81
     82	dev_err(adev->dev,
     83		"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
     84		status);
     85	dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
     86		cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
     87		cid);
     88	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
     89		REG_GET_FIELD(status,
     90		GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
     91	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
     92		REG_GET_FIELD(status,
     93		GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
     94	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
     95		REG_GET_FIELD(status,
     96		GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
     97	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
     98		REG_GET_FIELD(status,
     99		GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
    100	dev_err(adev->dev, "\t RW: 0x%lx\n",
    101		REG_GET_FIELD(status,
    102		GCVM_L2_PROTECTION_FAULT_STATUS, RW));
    103}
    104
    105static u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
    106{
    107	u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
    108
    109	base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
    110	base <<= 24;
    111
    112	return base;
    113}
    114
    115static u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
    116{
    117	return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
    118}
    119
    120static void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
    121				uint64_t page_table_base)
    122{
    123	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    124
    125	WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
    126			    hub->ctx_addr_distance * vmid,
    127			    lower_32_bits(page_table_base));
    128
    129	WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
    130			    hub->ctx_addr_distance * vmid,
    131			    upper_32_bits(page_table_base));
    132}
    133
    134static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
    135{
    136	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
    137
    138	gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
    139
    140	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
    141		     (u32)(adev->gmc.gart_start >> 12));
    142	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
    143		     (u32)(adev->gmc.gart_start >> 44));
    144
    145	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
    146		     (u32)(adev->gmc.gart_end >> 12));
    147	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
    148		     (u32)(adev->gmc.gart_end >> 44));
    149}
    150
    151static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
    152{
    153	uint64_t value;
    154
    155	if (!amdgpu_sriov_vf(adev)) {
    156		/* Program the AGP BAR */
    157		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
    158		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
    159		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
    160
    161		/* Program the system aperture low logical page number. */
    162		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
    163			     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
    164		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
    165			     max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
    166
    167		/* Set default page address. */
    168		value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
    169		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
    170			     (u32)(value >> 12));
    171		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
    172			     (u32)(value >> 44));
    173	}
    174
    175	/* Program "protection fault". */
    176	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
    177		     (u32)(adev->dummy_page_addr >> 12));
    178	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
    179		     (u32)((u64)adev->dummy_page_addr >> 44));
    180
    181	WREG32_FIELD15(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2,
    182		       ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
    183}
    184
    185
    186static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
    187{
    188	uint32_t tmp;
    189
    190	/* Setup TLB control */
    191	tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
    192
    193	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
    194	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
    195	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
    196			    ENABLE_ADVANCED_DRIVER_MODEL, 1);
    197	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
    198			    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
    199	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
    200			    MTYPE, MTYPE_UC); /* UC, uncached */
    201
    202	WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
    203}
    204
    205static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
    206{
    207	uint32_t tmp;
    208
    209	/* These regs are not accessible for VF, PF will program these in SRIOV */
    210	if (amdgpu_sriov_vf(adev))
    211		return;
    212
    213	/* Setup L2 cache */
    214	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
    215	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
    216	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
    217	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
    218			    ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
    219	/* XXX for emulation, Refer to closed source code.*/
    220	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
    221			    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
    222	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
    223	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
    224	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
    225	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, tmp);
    226
    227	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2);
    228	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
    229	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
    230	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
    231
    232	tmp = mmGCVM_L2_CNTL3_DEFAULT;
    233	if (adev->gmc.translate_further) {
    234		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
    235		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
    236				    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
    237	} else {
    238		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
    239		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
    240				    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
    241	}
    242	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
    243
    244	tmp = mmGCVM_L2_CNTL4_DEFAULT;
    245	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
    246	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
    247	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL4, tmp);
    248
    249	tmp = mmGCVM_L2_CNTL5_DEFAULT;
    250	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
    251	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL5, tmp);
    252}
    253
    254static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
    255{
    256	uint32_t tmp;
    257
    258	tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
    259	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
    260	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
    261	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
    262			    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
    263	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
    264}
    265
    266static void gfxhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
    267{
    268	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
    269		     0xFFFFFFFF);
    270	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
    271		     0x0000000F);
    272
    273	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
    274		     0);
    275	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
    276		     0);
    277
    278	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
    279	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
    280
    281}
    282
    283static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
    284{
    285	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    286	int i;
    287	uint32_t tmp;
    288
    289	for (i = 0; i <= 14; i++) {
    290		tmp = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i);
    291		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
    292		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
    293				    adev->vm_manager.num_level);
    294		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    295				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    296		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    297				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    298		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    299				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    300		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    301				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    302		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    303				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    304		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    305				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    306		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    307				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    308		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    309				PAGE_TABLE_BLOCK_SIZE,
    310				adev->vm_manager.block_size - 9);
    311		/* Send no-retry XNACK on fault to suppress VM fault storm. */
    312		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    313				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
    314				    !adev->gmc.noretry);
    315		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
    316				    i * hub->ctx_distance, tmp);
    317		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
    318				    i * hub->ctx_addr_distance, 0);
    319		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
    320				    i * hub->ctx_addr_distance, 0);
    321		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
    322				    i * hub->ctx_addr_distance,
    323				    lower_32_bits(adev->vm_manager.max_pfn - 1));
    324		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
    325				    i * hub->ctx_addr_distance,
    326				    upper_32_bits(adev->vm_manager.max_pfn - 1));
    327	}
    328
    329	hub->vm_cntx_cntl = tmp;
    330}
    331
    332static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
    333{
    334	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    335	unsigned i;
    336
    337	for (i = 0 ; i < 18; ++i) {
    338		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
    339				    i * hub->eng_addr_distance, 0xffffffff);
    340		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
    341				    i * hub->eng_addr_distance, 0x1f);
    342	}
    343}
    344
    345static int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
    346{
    347	/* GART Enable. */
    348	gfxhub_v2_0_init_gart_aperture_regs(adev);
    349	gfxhub_v2_0_init_system_aperture_regs(adev);
    350	gfxhub_v2_0_init_tlb_regs(adev);
    351	gfxhub_v2_0_init_cache_regs(adev);
    352
    353	gfxhub_v2_0_enable_system_domain(adev);
    354	gfxhub_v2_0_disable_identity_aperture(adev);
    355	gfxhub_v2_0_setup_vmid_config(adev);
    356	gfxhub_v2_0_program_invalidation(adev);
    357
    358	return 0;
    359}
    360
    361static void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
    362{
    363	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    364	u32 tmp;
    365	u32 i;
    366
    367	/* Disable all tables */
    368	for (i = 0; i < 16; i++)
    369		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL,
    370				    i * hub->ctx_distance, 0);
    371
    372	/* Setup TLB control */
    373	tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
    374	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
    375	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
    376			    ENABLE_ADVANCED_DRIVER_MODEL, 0);
    377	WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
    378
    379	if (!amdgpu_sriov_vf(adev)) {
    380		/* Setup L2 cache */
    381		WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
    382		WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
    383	}
    384}
    385
    386/**
    387 * gfxhub_v2_0_set_fault_enable_default - update GART/VM fault handling
    388 *
    389 * @adev: amdgpu_device pointer
    390 * @value: true redirects VM faults to the default page
    391 */
    392static void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
    393					  bool value)
    394{
    395	u32 tmp;
    396	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
    397	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    398			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    399	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    400			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    401	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    402			    PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    403	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    404			    PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    405	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    406			    TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
    407			    value);
    408	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    409			    NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    410	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    411			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    412	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    413			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    414	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    415			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    416	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    417			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    418	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    419			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    420	if (!value) {
    421		tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    422				CRASH_ON_NO_RETRY_FAULT, 1);
    423		tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    424				CRASH_ON_RETRY_FAULT, 1);
    425	}
    426	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
    427}
    428
    429static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = {
    430	.print_l2_protection_fault_status = gfxhub_v2_0_print_l2_protection_fault_status,
    431	.get_invalidate_req = gfxhub_v2_0_get_invalidate_req,
    432};
    433
    434static void gfxhub_v2_0_init(struct amdgpu_device *adev)
    435{
    436	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    437
    438	hub->ctx0_ptb_addr_lo32 =
    439		SOC15_REG_OFFSET(GC, 0,
    440				 mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
    441	hub->ctx0_ptb_addr_hi32 =
    442		SOC15_REG_OFFSET(GC, 0,
    443				 mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
    444	hub->vm_inv_eng0_sem =
    445		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
    446	hub->vm_inv_eng0_req =
    447		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
    448	hub->vm_inv_eng0_ack =
    449		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK);
    450	hub->vm_context0_cntl =
    451		SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL);
    452	hub->vm_l2_pro_fault_status =
    453		SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_STATUS);
    454	hub->vm_l2_pro_fault_cntl =
    455		SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
    456
    457	hub->ctx_distance = mmGCVM_CONTEXT1_CNTL - mmGCVM_CONTEXT0_CNTL;
    458	hub->ctx_addr_distance = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
    459		mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
    460	hub->eng_distance = mmGCVM_INVALIDATE_ENG1_REQ -
    461		mmGCVM_INVALIDATE_ENG0_REQ;
    462	hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
    463		mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
    464
    465	hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    466		GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    467		GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    468		GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    469		GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    470		GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    471		GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
    472
    473	hub->vmhub_funcs = &gfxhub_v2_0_vmhub_funcs;
    474}
    475
    476const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs = {
    477	.get_fb_location = gfxhub_v2_0_get_fb_location,
    478	.get_mc_fb_offset = gfxhub_v2_0_get_mc_fb_offset,
    479	.setup_vm_pt_regs = gfxhub_v2_0_setup_vm_pt_regs,
    480	.gart_enable = gfxhub_v2_0_gart_enable,
    481	.gart_disable = gfxhub_v2_0_gart_disable,
    482	.set_fault_enable_default = gfxhub_v2_0_set_fault_enable_default,
    483	.init = gfxhub_v2_0_init,
    484};