cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gfxhub_v2_1.c (20619B)


      1/*
      2 * Copyright 2019 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#include "amdgpu.h"
     25#include "gfxhub_v2_1.h"
     26
     27#include "gc/gc_10_3_0_offset.h"
     28#include "gc/gc_10_3_0_sh_mask.h"
     29#include "gc/gc_10_3_0_default.h"
     30#include "navi10_enum.h"
     31
     32#include "soc15_common.h"
     33
     34#define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP				0x16f8
     35#define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP_BASE_IDX	0
     36
     37static const char *gfxhub_client_ids[] = {
     38	"CB/DB",
     39	"Reserved",
     40	"GE1",
     41	"GE2",
     42	"CPF",
     43	"CPC",
     44	"CPG",
     45	"RLC",
     46	"TCP",
     47	"SQC (inst)",
     48	"SQC (data)",
     49	"SQG",
     50	"Reserved",
     51	"SDMA0",
     52	"SDMA1",
     53	"GCR",
     54	"SDMA2",
     55	"SDMA3",
     56};
     57
     58static uint32_t gfxhub_v2_1_get_invalidate_req(unsigned int vmid,
     59					       uint32_t flush_type)
     60{
     61	u32 req = 0;
     62
     63	/* invalidate using legacy mode on vmid*/
     64	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
     65			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
     66	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
     67	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
     68	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
     69	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
     70	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
     71	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
     72	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
     73			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
     74
     75	return req;
     76}
     77
     78static void
     79gfxhub_v2_1_print_l2_protection_fault_status(struct amdgpu_device *adev,
     80					     uint32_t status)
     81{
     82	u32 cid = REG_GET_FIELD(status,
     83				GCVM_L2_PROTECTION_FAULT_STATUS, CID);
     84
     85	dev_err(adev->dev,
     86		"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
     87		status);
     88	dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
     89		cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
     90		cid);
     91	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
     92		REG_GET_FIELD(status,
     93		GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
     94	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
     95		REG_GET_FIELD(status,
     96		GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
     97	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
     98		REG_GET_FIELD(status,
     99		GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
    100	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
    101		REG_GET_FIELD(status,
    102		GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
    103	dev_err(adev->dev, "\t RW: 0x%lx\n",
    104		REG_GET_FIELD(status,
    105		GCVM_L2_PROTECTION_FAULT_STATUS, RW));
    106}
    107
    108static u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
    109{
    110	u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
    111
    112	base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
    113	base <<= 24;
    114
    115	return base;
    116}
    117
    118static u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev)
    119{
    120	return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
    121}
    122
    123static void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
    124				uint64_t page_table_base)
    125{
    126	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    127
    128	WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
    129			    hub->ctx_addr_distance * vmid,
    130			    lower_32_bits(page_table_base));
    131
    132	WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
    133			    hub->ctx_addr_distance * vmid,
    134			    upper_32_bits(page_table_base));
    135}
    136
    137static void gfxhub_v2_1_init_gart_aperture_regs(struct amdgpu_device *adev)
    138{
    139	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
    140
    141	gfxhub_v2_1_setup_vm_pt_regs(adev, 0, pt_base);
    142
    143	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
    144		     (u32)(adev->gmc.gart_start >> 12));
    145	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
    146		     (u32)(adev->gmc.gart_start >> 44));
    147
    148	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
    149		     (u32)(adev->gmc.gart_end >> 12));
    150	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
    151		     (u32)(adev->gmc.gart_end >> 44));
    152}
    153
    154static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
    155{
    156	uint64_t value;
    157
    158	/* Program the AGP BAR */
    159	WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
    160	WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
    161	WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
    162
    163	/* Program the system aperture low logical page number. */
    164	WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
    165		     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
    166	WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
    167		     max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
    168
    169	/* Set default page address. */
    170	value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
    171	WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
    172		     (u32)(value >> 12));
    173	WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
    174		     (u32)(value >> 44));
    175
    176	/* Program "protection fault". */
    177	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
    178		     (u32)(adev->dummy_page_addr >> 12));
    179	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
    180		     (u32)((u64)adev->dummy_page_addr >> 44));
    181
    182	WREG32_FIELD15(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2,
    183		       ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
    184}
    185
    186
    187static void gfxhub_v2_1_init_tlb_regs(struct amdgpu_device *adev)
    188{
    189	uint32_t tmp;
    190
    191	/* Setup TLB control */
    192	tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
    193
    194	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
    195	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
    196	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
    197			    ENABLE_ADVANCED_DRIVER_MODEL, 1);
    198	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
    199			    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
    200	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
    201			    MTYPE, MTYPE_UC); /* UC, uncached */
    202
    203	WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
    204}
    205
    206static void gfxhub_v2_1_init_cache_regs(struct amdgpu_device *adev)
    207{
    208	uint32_t tmp;
    209
    210	/* These registers are not accessible to VF-SRIOV.
    211	 * The PF will program them instead.
    212	 */
    213	if (amdgpu_sriov_vf(adev))
    214		return;
    215
    216	/* Setup L2 cache */
    217	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
    218	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
    219	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
    220	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
    221			    ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
    222	/* XXX for emulation, Refer to closed source code.*/
    223	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
    224			    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
    225	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
    226	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
    227	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
    228	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, tmp);
    229
    230	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2);
    231	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
    232	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
    233	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
    234
    235	tmp = mmGCVM_L2_CNTL3_DEFAULT;
    236	if (adev->gmc.translate_further) {
    237		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
    238		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
    239				    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
    240	} else {
    241		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
    242		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
    243				    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
    244	}
    245	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
    246
    247	tmp = mmGCVM_L2_CNTL4_DEFAULT;
    248	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
    249	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
    250	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL4, tmp);
    251
    252	tmp = mmGCVM_L2_CNTL5_DEFAULT;
    253	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
    254	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL5, tmp);
    255}
    256
    257static void gfxhub_v2_1_enable_system_domain(struct amdgpu_device *adev)
    258{
    259	uint32_t tmp;
    260
    261	tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
    262	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
    263	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
    264	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
    265			    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
    266	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
    267}
    268
    269static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev)
    270{
    271	/* These registers are not accessible to VF-SRIOV.
    272	 * The PF will program them instead.
    273	 */
    274	if (amdgpu_sriov_vf(adev))
    275		return;
    276
    277	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
    278		     0xFFFFFFFF);
    279	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
    280		     0x0000000F);
    281
    282	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
    283		     0);
    284	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
    285		     0);
    286
    287	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
    288	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
    289
    290}
    291
    292static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev)
    293{
    294	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    295	int i;
    296	uint32_t tmp;
    297
    298	for (i = 0; i <= 14; i++) {
    299		tmp = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i);
    300		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
    301		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
    302				    adev->vm_manager.num_level);
    303		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    304				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    305		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    306				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    307		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    308				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    309		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    310				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    311		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    312				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    313		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    314				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    315		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    316				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    317		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    318				PAGE_TABLE_BLOCK_SIZE,
    319				adev->vm_manager.block_size - 9);
    320		/* Send no-retry XNACK on fault to suppress VM fault storm. */
    321		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
    322				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
    323				    !adev->gmc.noretry);
    324		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
    325				    i * hub->ctx_distance, tmp);
    326		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
    327				    i * hub->ctx_addr_distance, 0);
    328		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
    329				    i * hub->ctx_addr_distance, 0);
    330		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
    331				    i * hub->ctx_addr_distance,
    332				    lower_32_bits(adev->vm_manager.max_pfn - 1));
    333		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
    334				    i * hub->ctx_addr_distance,
    335				    upper_32_bits(adev->vm_manager.max_pfn - 1));
    336	}
    337
    338	hub->vm_cntx_cntl = tmp;
    339}
    340
    341static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev)
    342{
    343	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    344	unsigned i;
    345
    346	for (i = 0 ; i < 18; ++i) {
    347		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
    348				    i * hub->eng_addr_distance, 0xffffffff);
    349		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
    350				    i * hub->eng_addr_distance, 0x1f);
    351	}
    352}
    353
    354static int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
    355{
    356	if (amdgpu_sriov_vf(adev)) {
    357		/*
    358		 * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
    359		 * VF copy registers so vbios post doesn't program them, for
    360		 * SRIOV driver need to program them
    361		 */
    362		WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE,
    363			     adev->gmc.vram_start >> 24);
    364		WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP,
    365			     adev->gmc.vram_end >> 24);
    366	}
    367
    368	/* GART Enable. */
    369	gfxhub_v2_1_init_gart_aperture_regs(adev);
    370	gfxhub_v2_1_init_system_aperture_regs(adev);
    371	gfxhub_v2_1_init_tlb_regs(adev);
    372	gfxhub_v2_1_init_cache_regs(adev);
    373
    374	gfxhub_v2_1_enable_system_domain(adev);
    375	gfxhub_v2_1_disable_identity_aperture(adev);
    376	gfxhub_v2_1_setup_vmid_config(adev);
    377	gfxhub_v2_1_program_invalidation(adev);
    378
    379	return 0;
    380}
    381
    382static void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
    383{
    384	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    385	u32 tmp;
    386	u32 i;
    387
    388	/* Disable all tables */
    389	for (i = 0; i < 16; i++)
    390		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL,
    391				    i * hub->ctx_distance, 0);
    392
    393	/* Setup TLB control */
    394	tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
    395	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
    396	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
    397			    ENABLE_ADVANCED_DRIVER_MODEL, 0);
    398	WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
    399
    400	/* Setup L2 cache */
    401	WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
    402	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
    403}
    404
    405/**
    406 * gfxhub_v2_1_set_fault_enable_default - update GART/VM fault handling
    407 *
    408 * @adev: amdgpu_device pointer
    409 * @value: true redirects VM faults to the default page
    410 */
    411static void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
    412					  bool value)
    413{
    414	u32 tmp;
    415
    416	/* These registers are not accessible to VF-SRIOV.
    417	 * The PF will program them instead.
    418	 */
    419	if (amdgpu_sriov_vf(adev))
    420		return;
    421
    422	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
    423	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    424			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    425	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    426			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    427	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    428			    PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    429	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    430			    PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    431	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    432			    TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
    433			    value);
    434	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    435			    NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    436	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    437			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    438	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    439			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    440	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    441			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    442	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    443			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    444	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    445			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    446	if (!value) {
    447		tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    448				CRASH_ON_NO_RETRY_FAULT, 1);
    449		tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
    450				CRASH_ON_RETRY_FAULT, 1);
    451	}
    452	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
    453}
    454
    455static const struct amdgpu_vmhub_funcs gfxhub_v2_1_vmhub_funcs = {
    456	.print_l2_protection_fault_status = gfxhub_v2_1_print_l2_protection_fault_status,
    457	.get_invalidate_req = gfxhub_v2_1_get_invalidate_req,
    458};
    459
    460static void gfxhub_v2_1_init(struct amdgpu_device *adev)
    461{
    462	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
    463
    464	hub->ctx0_ptb_addr_lo32 =
    465		SOC15_REG_OFFSET(GC, 0,
    466				 mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
    467	hub->ctx0_ptb_addr_hi32 =
    468		SOC15_REG_OFFSET(GC, 0,
    469				 mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
    470	hub->vm_inv_eng0_sem =
    471		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
    472	hub->vm_inv_eng0_req =
    473		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
    474	hub->vm_inv_eng0_ack =
    475		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK);
    476	hub->vm_context0_cntl =
    477		SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL);
    478	hub->vm_l2_pro_fault_status =
    479		SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_STATUS);
    480	hub->vm_l2_pro_fault_cntl =
    481		SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
    482
    483	hub->ctx_distance = mmGCVM_CONTEXT1_CNTL - mmGCVM_CONTEXT0_CNTL;
    484	hub->ctx_addr_distance = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
    485		mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
    486	hub->eng_distance = mmGCVM_INVALIDATE_ENG1_REQ -
    487		mmGCVM_INVALIDATE_ENG0_REQ;
    488	hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
    489		mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
    490
    491	hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    492		GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    493		GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    494		GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    495		GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    496		GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    497		GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
    498
    499	hub->vmhub_funcs = &gfxhub_v2_1_vmhub_funcs;
    500}
    501
    502static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
    503{
    504	u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_CNTL);
    505	u32 max_region =
    506		REG_GET_FIELD(xgmi_lfb_cntl, GCMC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
    507	u32 max_num_physical_nodes   = 0;
    508	u32 max_physical_node_id     = 0;
    509
    510	switch (adev->ip_versions[XGMI_HWIP][0]) {
    511	case IP_VERSION(4, 8, 0):
    512		max_num_physical_nodes   = 4;
    513		max_physical_node_id     = 3;
    514		break;
    515	default:
    516		return -EINVAL;
    517	}
    518
    519	/* PF_MAX_REGION=0 means xgmi is disabled */
    520	if (max_region) {
    521		adev->gmc.xgmi.num_physical_nodes = max_region + 1;
    522		if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
    523			return -EINVAL;
    524
    525		adev->gmc.xgmi.physical_node_id =
    526			REG_GET_FIELD(xgmi_lfb_cntl, GCMC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
    527		if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
    528			return -EINVAL;
    529
    530		adev->gmc.xgmi.node_segment_size = REG_GET_FIELD(
    531			RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_SIZE),
    532			GCMC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
    533	}
    534
    535	return 0;
    536}
    537
    538static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
    539{
    540	int i;
    541	u32 tmp = 0, disabled_sa = 0;
    542	u32 efuse_setting, vbios_setting;
    543
    544	u32 max_sa_mask = amdgpu_gfx_create_bitmask(
    545		adev->gfx.config.max_sh_per_se *
    546		adev->gfx.config.max_shader_engines);
    547
    548	switch (adev->ip_versions[GC_HWIP][0]) {
    549	case IP_VERSION(10, 3, 1):
    550	case IP_VERSION(10, 3, 3):
    551		/* Get SA disabled bitmap from eFuse setting */
    552		efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
    553		efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
    554		efuse_setting >>= CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT;
    555
    556		/* Get SA disabled bitmap from VBIOS setting */
    557		vbios_setting = RREG32_SOC15(GC, 0, mmGC_USER_SA_UNIT_DISABLE);
    558		vbios_setting &= GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK;
    559		vbios_setting >>= GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT;
    560
    561		disabled_sa |= efuse_setting | vbios_setting;
    562		/* Make sure not to report harvested SAs beyond the max SA count */
    563		disabled_sa &= max_sa_mask;
    564
    565		for (i = 0; disabled_sa > 0; i++) {
    566			if (disabled_sa & 1)
    567				tmp |= 0x3 << (i * 2);
    568			disabled_sa >>= 1;
    569		}
    570		disabled_sa = tmp;
    571
    572		WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa);
    573		break;
    574	default:
    575		break;
    576	}
    577}
    578
    579const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
    580	.get_fb_location = gfxhub_v2_1_get_fb_location,
    581	.get_mc_fb_offset = gfxhub_v2_1_get_mc_fb_offset,
    582	.setup_vm_pt_regs = gfxhub_v2_1_setup_vm_pt_regs,
    583	.gart_enable = gfxhub_v2_1_gart_enable,
    584	.gart_disable = gfxhub_v2_1_gart_disable,
    585	.set_fault_enable_default = gfxhub_v2_1_set_fault_enable_default,
    586	.init = gfxhub_v2_1_init,
    587	.get_xgmi_info = gfxhub_v2_1_get_xgmi_info,
    588	.utcl2_harvest = gfxhub_v2_1_utcl2_harvest,
    589};