cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmhub_v2_3.c (23225B)


      1/*
      2 * Copyright 2019 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#include "amdgpu.h"
     25#include "mmhub_v2_3.h"
     26
     27#include "mmhub/mmhub_2_3_0_offset.h"
     28#include "mmhub/mmhub_2_3_0_sh_mask.h"
     29#include "mmhub/mmhub_2_3_0_default.h"
     30#include "navi10_enum.h"
     31
     32#include "soc15_common.h"
     33
     34static const char *mmhub_client_ids_vangogh[][2] = {
     35	[0][0] = "MP0",
     36	[1][0] = "MP1",
     37	[2][0] = "DCEDMC",
     38	[3][0] = "DCEVGA",
     39	[13][0] = "UTCL2",
     40	[26][0] = "OSS",
     41	[27][0] = "HDP",
     42	[28][0] = "VCN",
     43	[29][0] = "VCNU",
     44	[30][0] = "JPEG",
     45	[0][1] = "MP0",
     46	[1][1] = "MP1",
     47	[2][1] = "DCEDMC",
     48	[3][1] = "DCEVGA",
     49	[4][1] = "DCEDWB",
     50	[5][1] = "XDP",
     51	[26][1] = "OSS",
     52	[27][1] = "HDP",
     53	[28][1] = "VCN",
     54	[29][1] = "VCNU",
     55	[30][1] = "JPEG",
     56};
     57
     58static uint32_t mmhub_v2_3_get_invalidate_req(unsigned int vmid,
     59					      uint32_t flush_type)
     60{
     61	u32 req = 0;
     62
     63	/* invalidate using legacy mode on vmid*/
     64	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
     65			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
     66	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
     67	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
     68	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
     69	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
     70	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
     71	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
     72	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
     73			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
     74
     75	return req;
     76}
     77
     78static void
     79mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
     80					     uint32_t status)
     81{
     82	uint32_t cid, rw;
     83	const char *mmhub_cid = NULL;
     84
     85	cid = REG_GET_FIELD(status,
     86			    MMVM_L2_PROTECTION_FAULT_STATUS, CID);
     87	rw = REG_GET_FIELD(status,
     88			   MMVM_L2_PROTECTION_FAULT_STATUS, RW);
     89
     90	dev_err(adev->dev,
     91		"MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
     92		status);
     93	switch (adev->ip_versions[MMHUB_HWIP][0]) {
     94	case IP_VERSION(2, 3, 0):
     95	case IP_VERSION(2, 4, 0):
     96	case IP_VERSION(2, 4, 1):
     97		mmhub_cid = mmhub_client_ids_vangogh[cid][rw];
     98		break;
     99	default:
    100		mmhub_cid = NULL;
    101		break;
    102	}
    103	dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
    104		mmhub_cid ? mmhub_cid : "unknown", cid);
    105	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
    106		REG_GET_FIELD(status,
    107		MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
    108	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
    109		REG_GET_FIELD(status,
    110		MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
    111	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
    112		REG_GET_FIELD(status,
    113		MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
    114	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
    115		REG_GET_FIELD(status,
    116		MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
    117	dev_err(adev->dev, "\t RW: 0x%x\n", rw);
    118}
    119
    120static void mmhub_v2_3_setup_vm_pt_regs(struct amdgpu_device *adev,
    121					uint32_t vmid,
    122					uint64_t page_table_base)
    123{
    124	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
    125
    126	WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
    127			    hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base));
    128
    129	WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
    130			    hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base));
    131}
    132
    133static void mmhub_v2_3_init_gart_aperture_regs(struct amdgpu_device *adev)
    134{
    135	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
    136
    137	mmhub_v2_3_setup_vm_pt_regs(adev, 0, pt_base);
    138
    139	WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
    140		     (u32)(adev->gmc.gart_start >> 12));
    141	WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
    142		     (u32)(adev->gmc.gart_start >> 44));
    143
    144	WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
    145		     (u32)(adev->gmc.gart_end >> 12));
    146	WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
    147		     (u32)(adev->gmc.gart_end >> 44));
    148}
    149
    150static void mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device *adev)
    151{
    152	uint64_t value;
    153	uint32_t tmp;
    154
    155	/* Disable AGP. */
    156	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0);
    157	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
    158	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
    159
    160	/* Program the system aperture low logical page number. */
    161	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
    162		     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
    163	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
    164		     max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
    165
    166	/* Set default page address. */
    167	value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
    168	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
    169		     (u32)(value >> 12));
    170	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
    171		     (u32)(value >> 44));
    172
    173	/* Program "protection fault". */
    174	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
    175		     (u32)(adev->dummy_page_addr >> 12));
    176	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
    177		     (u32)((u64)adev->dummy_page_addr >> 44));
    178
    179	tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2);
    180	tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2,
    181			    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
    182	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2, tmp);
    183}
    184
    185static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev)
    186{
    187	uint32_t tmp;
    188
    189	/* Setup TLB control */
    190	tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL);
    191
    192	tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
    193	tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
    194	tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
    195			    ENABLE_ADVANCED_DRIVER_MODEL, 1);
    196	tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
    197			    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
    198	tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
    199			    MTYPE, MTYPE_UC); /* UC, uncached */
    200
    201	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp);
    202}
    203
    204static void mmhub_v2_3_init_cache_regs(struct amdgpu_device *adev)
    205{
    206	uint32_t tmp;
    207
    208	/* Setup L2 cache */
    209	tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
    210	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
    211	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
    212	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
    213			    ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
    214	/* XXX for emulation, Refer to closed source code.*/
    215	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
    216			    0);
    217	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
    218	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
    219	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
    220	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp);
    221
    222	tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2);
    223	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
    224	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
    225	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
    226
    227	tmp = mmMMVM_L2_CNTL3_DEFAULT;
    228	if (adev->gmc.translate_further) {
    229		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
    230		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
    231				    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
    232	} else {
    233		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
    234		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
    235				    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
    236	}
    237	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
    238
    239	tmp = mmMMVM_L2_CNTL4_DEFAULT;
    240	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
    241	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
    242	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL4, tmp);
    243
    244	tmp = mmMMVM_L2_CNTL5_DEFAULT;
    245	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
    246	WREG32_SOC15(GC, 0, mmMMVM_L2_CNTL5, tmp);
    247}
    248
    249static void mmhub_v2_3_enable_system_domain(struct amdgpu_device *adev)
    250{
    251	uint32_t tmp;
    252
    253	tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
    254	tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
    255	tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
    256	tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
    257			    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
    258	WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
    259}
    260
    261static void mmhub_v2_3_disable_identity_aperture(struct amdgpu_device *adev)
    262{
    263	WREG32_SOC15(MMHUB, 0,
    264		     mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
    265		     0xFFFFFFFF);
    266	WREG32_SOC15(MMHUB, 0,
    267		     mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
    268		     0x0000000F);
    269
    270	WREG32_SOC15(MMHUB, 0,
    271		     mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
    272	WREG32_SOC15(MMHUB, 0,
    273		     mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
    274
    275	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
    276		     0);
    277	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
    278		     0);
    279}
    280
    281static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
    282{
    283	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
    284	int i;
    285	uint32_t tmp;
    286
    287	for (i = 0; i <= 14; i++) {
    288		tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i);
    289		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
    290		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
    291				    adev->vm_manager.num_level);
    292		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
    293				    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    294		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
    295				    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
    296				    1);
    297		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
    298				    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    299		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
    300				    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    301		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
    302				    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    303		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
    304				    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    305		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
    306				    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    307		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
    308				    PAGE_TABLE_BLOCK_SIZE,
    309				    adev->vm_manager.block_size - 9);
    310		/* Send no-retry XNACK on fault to suppress VM fault storm. */
    311		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
    312				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
    313				    !adev->gmc.noretry);
    314		WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
    315				    i * hub->ctx_distance, tmp);
    316		WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
    317				    i * hub->ctx_addr_distance, 0);
    318		WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
    319				    i * hub->ctx_addr_distance, 0);
    320		WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
    321				    i * hub->ctx_addr_distance,
    322				    lower_32_bits(adev->vm_manager.max_pfn - 1));
    323		WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
    324				    i * hub->ctx_addr_distance,
    325				    upper_32_bits(adev->vm_manager.max_pfn - 1));
    326	}
    327
    328	hub->vm_cntx_cntl = tmp;
    329}
    330
    331static void mmhub_v2_3_program_invalidation(struct amdgpu_device *adev)
    332{
    333	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
    334	unsigned i;
    335
    336	for (i = 0; i < 18; ++i) {
    337		WREG32_SOC15_OFFSET(MMHUB, 0,
    338				    mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
    339				    i * hub->eng_addr_distance, 0xffffffff);
    340		WREG32_SOC15_OFFSET(MMHUB, 0,
    341				    mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
    342				    i * hub->eng_addr_distance, 0x1f);
    343	}
    344}
    345
    346static int mmhub_v2_3_gart_enable(struct amdgpu_device *adev)
    347{
    348	if (amdgpu_sriov_vf(adev)) {
    349		/*
    350		 * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
    351		 * VF copy registers so vbios post doesn't program them, for
    352		 * SRIOV driver need to program them
    353		 */
    354		WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE,
    355			     adev->gmc.vram_start >> 24);
    356		WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP,
    357			     adev->gmc.vram_end >> 24);
    358	}
    359
    360	/* GART Enable. */
    361	mmhub_v2_3_init_gart_aperture_regs(adev);
    362	mmhub_v2_3_init_system_aperture_regs(adev);
    363	mmhub_v2_3_init_tlb_regs(adev);
    364	mmhub_v2_3_init_cache_regs(adev);
    365
    366	mmhub_v2_3_enable_system_domain(adev);
    367	mmhub_v2_3_disable_identity_aperture(adev);
    368	mmhub_v2_3_setup_vmid_config(adev);
    369	mmhub_v2_3_program_invalidation(adev);
    370
    371	return 0;
    372}
    373
    374static void mmhub_v2_3_gart_disable(struct amdgpu_device *adev)
    375{
    376	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
    377	u32 tmp;
    378	u32 i;
    379
    380	/* Disable all tables */
    381	for (i = 0; i < AMDGPU_NUM_VMID; i++)
    382		WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL,
    383				    i * hub->ctx_distance, 0);
    384
    385	/* Setup TLB control */
    386	tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL);
    387	tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
    388	tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
    389			    ENABLE_ADVANCED_DRIVER_MODEL, 0);
    390	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp);
    391
    392	/* Setup L2 cache */
    393	tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
    394	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0);
    395	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp);
    396	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, 0);
    397}
    398
    399/**
    400 * mmhub_v2_3_set_fault_enable_default - update GART/VM fault handling
    401 *
    402 * @adev: amdgpu_device pointer
    403 * @value: true redirects VM faults to the default page
    404 */
    405static void mmhub_v2_3_set_fault_enable_default(struct amdgpu_device *adev,
    406						bool value)
    407{
    408	u32 tmp;
    409	tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
    410	tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    411			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    412	tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    413			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    414	tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    415			    PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    416	tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    417			    PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    418	tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    419			    TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
    420			    value);
    421	tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    422			    NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    423	tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    424			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    425	tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    426			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    427	tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    428			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    429	tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    430			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    431	tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    432			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    433	if (!value) {
    434		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    435				CRASH_ON_NO_RETRY_FAULT, 1);
    436		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
    437				CRASH_ON_RETRY_FAULT, 1);
    438	}
    439	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp);
    440}
    441
    442static const struct amdgpu_vmhub_funcs mmhub_v2_3_vmhub_funcs = {
    443	.print_l2_protection_fault_status = mmhub_v2_3_print_l2_protection_fault_status,
    444	.get_invalidate_req = mmhub_v2_3_get_invalidate_req,
    445};
    446
    447static void mmhub_v2_3_init(struct amdgpu_device *adev)
    448{
    449	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
    450
    451	hub->ctx0_ptb_addr_lo32 =
    452		SOC15_REG_OFFSET(MMHUB, 0,
    453				 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
    454	hub->ctx0_ptb_addr_hi32 =
    455		SOC15_REG_OFFSET(MMHUB, 0,
    456				 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
    457	hub->vm_inv_eng0_sem =
    458		SOC15_REG_OFFSET(MMHUB, 0,
    459				 mmMMVM_INVALIDATE_ENG0_SEM);
    460	hub->vm_inv_eng0_req =
    461		SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ);
    462	hub->vm_inv_eng0_ack =
    463		SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ACK);
    464	hub->vm_context0_cntl =
    465		SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
    466	hub->vm_l2_pro_fault_status =
    467		SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_STATUS);
    468	hub->vm_l2_pro_fault_cntl =
    469		SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
    470
    471	hub->ctx_distance = mmMMVM_CONTEXT1_CNTL - mmMMVM_CONTEXT0_CNTL;
    472	hub->ctx_addr_distance = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
    473		mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
    474	hub->eng_distance = mmMMVM_INVALIDATE_ENG1_REQ -
    475		mmMMVM_INVALIDATE_ENG0_REQ;
    476	hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
    477		mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
    478
    479	hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    480		MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    481		MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    482		MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    483		MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    484		MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
    485		MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
    486
    487	hub->vmhub_funcs = &mmhub_v2_3_vmhub_funcs;
    488}
    489
    490static void
    491mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
    492					    bool enable)
    493{
    494	uint32_t def, data, def1, data1;
    495
    496	def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
    497	def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
    498
    499	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
    500		data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
    501		data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
    502		           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
    503		           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
    504		           DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
    505		           DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
    506		           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
    507
    508	} else {
    509		data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
    510		data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
    511			  DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
    512			  DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
    513			  DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
    514			  DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
    515			  DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
    516	}
    517
    518	if (def != data)
    519		WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
    520	if (def1 != data1)
    521		WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
    522}
    523
    524static void
    525mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
    526					   bool enable)
    527{
    528	uint32_t def, data, def1, data1, def2, data2;
    529
    530	def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
    531	def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
    532	def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
    533
    534	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
    535		data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
    536		data1 &= ~(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
    537			DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
    538			DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
    539			DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
    540			DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
    541		data2 &= ~(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
    542			DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
    543			DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
    544			DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
    545			DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
    546	} else {
    547		data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
    548		data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
    549			DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
    550			DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
    551			DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
    552			DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
    553		data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
    554			DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
    555			DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
    556			DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
    557			DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
    558	}
    559
    560	if (def != data)
    561		WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
    562	if (def1 != data1)
    563		WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1);
    564	if (def2 != data2)
    565		WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2);
    566}
    567
    568static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
    569				      enum amd_clockgating_state state)
    570{
    571	if (amdgpu_sriov_vf(adev))
    572		return 0;
    573
    574	mmhub_v2_3_update_medium_grain_clock_gating(adev,
    575				state == AMD_CG_STATE_GATE);
    576	mmhub_v2_3_update_medium_grain_light_sleep(adev,
    577				state == AMD_CG_STATE_GATE);
    578
    579	return 0;
    580}
    581
    582static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u64 *flags)
    583{
    584	int data, data1, data2, data3;
    585
    586	if (amdgpu_sriov_vf(adev))
    587		*flags = 0;
    588
    589	data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
    590	data1  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
    591	data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
    592	data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
    593
    594	/* AMD_CG_SUPPORT_MC_MGCG */
    595	if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
    596		       DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
    597		       DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
    598		       DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
    599		       DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
    600		       DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
    601		&& !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
    602			*flags |= AMD_CG_SUPPORT_MC_MGCG;
    603	}
    604
    605	/* AMD_CG_SUPPORT_MC_LS */
    606	if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK)
    607		&& !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
    608				DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
    609				DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
    610				DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
    611				DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))
    612		&& !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
    613				DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
    614				DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
    615				DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
    616				DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)))
    617		*flags |= AMD_CG_SUPPORT_MC_LS;
    618}
    619
    620const struct amdgpu_mmhub_funcs mmhub_v2_3_funcs = {
    621	.init = mmhub_v2_3_init,
    622	.gart_enable = mmhub_v2_3_gart_enable,
    623	.set_fault_enable_default = mmhub_v2_3_set_fault_enable_default,
    624	.gart_disable = mmhub_v2_3_gart_disable,
    625	.set_clockgating = mmhub_v2_3_set_clockgating,
    626	.get_clockgating = mmhub_v2_3_get_clockgating,
    627	.setup_vm_pt_regs = mmhub_v2_3_setup_vm_pt_regs,
    628};