cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmhub_v1_0.c (28080B)


      1/*
      2 * Copyright 2016 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23#include "amdgpu.h"
     24#include "amdgpu_ras.h"
     25#include "mmhub_v1_0.h"
     26
     27#include "mmhub/mmhub_1_0_offset.h"
     28#include "mmhub/mmhub_1_0_sh_mask.h"
     29#include "mmhub/mmhub_1_0_default.h"
     30#include "vega10_enum.h"
     31#include "soc15.h"
     32#include "soc15_common.h"
     33
     34#define mmDAGB0_CNTL_MISC2_RV 0x008f
     35#define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
     36
     37static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
     38{
     39	u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
     40	u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
     41
     42	base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
     43	base <<= 24;
     44
     45	top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
     46	top <<= 24;
     47
     48	adev->gmc.fb_start = base;
     49	adev->gmc.fb_end = top;
     50
     51	return base;
     52}
     53
     54static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
     55				uint64_t page_table_base)
     56{
     57	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
     58
     59	WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
     60			    hub->ctx_addr_distance * vmid,
     61			    lower_32_bits(page_table_base));
     62
     63	WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
     64			    hub->ctx_addr_distance * vmid,
     65			    upper_32_bits(page_table_base));
     66}
     67
     68static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
     69{
     70	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
     71
     72	mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
     73
     74	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
     75		     (u32)(adev->gmc.gart_start >> 12));
     76	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
     77		     (u32)(adev->gmc.gart_start >> 44));
     78
     79	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
     80		     (u32)(adev->gmc.gart_end >> 12));
     81	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
     82		     (u32)(adev->gmc.gart_end >> 44));
     83}
     84
     85static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
     86{
     87	uint64_t value;
     88	uint32_t tmp;
     89
     90	/* Program the AGP BAR */
     91	WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
     92	WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
     93	WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
     94
     95	/* Program the system aperture low logical page number. */
     96	WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
     97		     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
     98
     99	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
    100		/*
    101		 * Raven2 has a HW issue that it is unable to use the vram which
    102		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
    103		 * workaround that increase system aperture high address (add 1)
    104		 * to get rid of the VM fault and hardware hang.
    105		 */
    106		WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
    107			     max((adev->gmc.fb_end >> 18) + 0x1,
    108				 adev->gmc.agp_end >> 18));
    109	else
    110		WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
    111			     max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
    112
    113	if (amdgpu_sriov_vf(adev))
    114		return;
    115
    116	/* Set default page address. */
    117	value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
    118	WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
    119		     (u32)(value >> 12));
    120	WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
    121		     (u32)(value >> 44));
    122
    123	/* Program "protection fault". */
    124	WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
    125		     (u32)(adev->dummy_page_addr >> 12));
    126	WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
    127		     (u32)((u64)adev->dummy_page_addr >> 44));
    128
    129	tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
    130	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
    131			    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
    132	WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2, tmp);
    133}
    134
    135static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
    136{
    137	uint32_t tmp;
    138
    139	/* Setup TLB control */
    140	tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
    141
    142	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
    143	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
    144	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
    145			    ENABLE_ADVANCED_DRIVER_MODEL, 1);
    146	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
    147			    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
    148	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
    149			    MTYPE, MTYPE_UC);/* XXX for emulation. */
    150	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
    151
    152	WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
    153}
    154
    155static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
    156{
    157	uint32_t tmp;
    158
    159	if (amdgpu_sriov_vf(adev))
    160		return;
    161
    162	/* Setup L2 cache */
    163	tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
    164	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
    165	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
    166	/* XXX for emulation, Refer to closed source code.*/
    167	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
    168			    0);
    169	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
    170	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
    171	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
    172	WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
    173
    174	tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2);
    175	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
    176	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
    177	WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
    178
    179	if (adev->gmc.translate_further) {
    180		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
    181		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
    182				    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
    183	} else {
    184		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
    185		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
    186				    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
    187	}
    188	WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
    189
    190	tmp = mmVM_L2_CNTL4_DEFAULT;
    191	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
    192	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
    193	WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp);
    194}
    195
    196static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
    197{
    198	uint32_t tmp;
    199
    200	tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
    201	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
    202	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
    203	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
    204			    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
    205	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
    206}
    207
    208static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
    209{
    210	if (amdgpu_sriov_vf(adev))
    211		return;
    212
    213	WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
    214		     0XFFFFFFFF);
    215	WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
    216		     0x0000000F);
    217
    218	WREG32_SOC15(MMHUB, 0,
    219		     mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
    220	WREG32_SOC15(MMHUB, 0,
    221		     mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
    222
    223	WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
    224		     0);
    225	WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
    226		     0);
    227}
    228
    229static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
    230{
    231	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
    232	unsigned num_level, block_size;
    233	uint32_t tmp;
    234	int i;
    235
    236	num_level = adev->vm_manager.num_level;
    237	block_size = adev->vm_manager.block_size;
    238	if (adev->gmc.translate_further)
    239		num_level -= 1;
    240	else
    241		block_size -= 9;
    242
    243	for (i = 0; i <= 14; i++) {
    244		tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i);
    245		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
    246		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
    247				    num_level);
    248		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    249				    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    250		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    251				    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
    252				    1);
    253		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    254				    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    255		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    256				    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    257		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    258				    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    259		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    260				    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    261		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    262				    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
    263		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    264				    PAGE_TABLE_BLOCK_SIZE,
    265				    block_size);
    266		/* Send no-retry XNACK on fault to suppress VM fault storm. */
    267		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
    268				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
    269				    !adev->gmc.noretry);
    270		WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL,
    271				    i * hub->ctx_distance, tmp);
    272		WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
    273				    i * hub->ctx_addr_distance, 0);
    274		WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
    275				    i * hub->ctx_addr_distance, 0);
    276		WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
    277				    i * hub->ctx_addr_distance,
    278				    lower_32_bits(adev->vm_manager.max_pfn - 1));
    279		WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
    280				    i * hub->ctx_addr_distance,
    281				    upper_32_bits(adev->vm_manager.max_pfn - 1));
    282	}
    283}
    284
    285static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
    286{
    287	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
    288	unsigned i;
    289
    290	for (i = 0; i < 18; ++i) {
    291		WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
    292				    i * hub->eng_addr_distance, 0xffffffff);
    293		WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
    294				    i * hub->eng_addr_distance, 0x1f);
    295	}
    296}
    297
    298static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
    299				bool enable)
    300{
    301	if (amdgpu_sriov_vf(adev))
    302		return;
    303
    304	if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
    305		amdgpu_dpm_set_powergating_by_smu(adev,
    306						  AMD_IP_BLOCK_TYPE_GMC,
    307						  enable);
    308}
    309
    310static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
    311{
    312	if (amdgpu_sriov_vf(adev)) {
    313		/*
    314		 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
    315		 * VF copy registers so vbios post doesn't program them, for
    316		 * SRIOV driver need to program them
    317		 */
    318		WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
    319			     adev->gmc.vram_start >> 24);
    320		WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
    321			     adev->gmc.vram_end >> 24);
    322	}
    323
    324	/* GART Enable. */
    325	mmhub_v1_0_init_gart_aperture_regs(adev);
    326	mmhub_v1_0_init_system_aperture_regs(adev);
    327	mmhub_v1_0_init_tlb_regs(adev);
    328	mmhub_v1_0_init_cache_regs(adev);
    329
    330	mmhub_v1_0_enable_system_domain(adev);
    331	mmhub_v1_0_disable_identity_aperture(adev);
    332	mmhub_v1_0_setup_vmid_config(adev);
    333	mmhub_v1_0_program_invalidation(adev);
    334
    335	return 0;
    336}
    337
    338static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
    339{
    340	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
    341	u32 tmp;
    342	u32 i;
    343
    344	/* Disable all tables */
    345	for (i = 0; i < AMDGPU_NUM_VMID; i++)
    346		WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL,
    347				    i * hub->ctx_distance, 0);
    348
    349	/* Setup TLB control */
    350	tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
    351	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
    352	tmp = REG_SET_FIELD(tmp,
    353				MC_VM_MX_L1_TLB_CNTL,
    354				ENABLE_ADVANCED_DRIVER_MODEL,
    355				0);
    356	WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
    357
    358	if (!amdgpu_sriov_vf(adev)) {
    359		/* Setup L2 cache */
    360		tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
    361		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
    362		WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
    363		WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
    364	}
    365}
    366
    367/**
    368 * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
    369 *
    370 * @adev: amdgpu_device pointer
    371 * @value: true redirects VM faults to the default page
    372 */
    373static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
    374{
    375	u32 tmp;
    376
    377	if (amdgpu_sriov_vf(adev))
    378		return;
    379
    380	tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
    381	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    382			RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    383	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    384			PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    385	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    386			PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    387	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    388			PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    389	tmp = REG_SET_FIELD(tmp,
    390			VM_L2_PROTECTION_FAULT_CNTL,
    391			TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
    392			value);
    393	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    394			NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    395	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    396			DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    397	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    398			VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    399	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    400			READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    401	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    402			WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    403	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    404			EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
    405	if (!value) {
    406		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    407				CRASH_ON_NO_RETRY_FAULT, 1);
    408		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
    409				CRASH_ON_RETRY_FAULT, 1);
    410	}
    411
    412	WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
    413}
    414
    415static void mmhub_v1_0_init(struct amdgpu_device *adev)
    416{
    417	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
    418
    419	hub->ctx0_ptb_addr_lo32 =
    420		SOC15_REG_OFFSET(MMHUB, 0,
    421				 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
    422	hub->ctx0_ptb_addr_hi32 =
    423		SOC15_REG_OFFSET(MMHUB, 0,
    424				 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
    425	hub->vm_inv_eng0_sem =
    426		SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
    427	hub->vm_inv_eng0_req =
    428		SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
    429	hub->vm_inv_eng0_ack =
    430		SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
    431	hub->vm_context0_cntl =
    432		SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
    433	hub->vm_l2_pro_fault_status =
    434		SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
    435	hub->vm_l2_pro_fault_cntl =
    436		SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
    437
    438	hub->ctx_distance = mmVM_CONTEXT1_CNTL - mmVM_CONTEXT0_CNTL;
    439	hub->ctx_addr_distance = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
    440		mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
    441	hub->eng_distance = mmVM_INVALIDATE_ENG1_REQ - mmVM_INVALIDATE_ENG0_REQ;
    442	hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
    443		mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
    444}
    445
    446static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
    447							bool enable)
    448{
    449	uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
    450
    451	def  = data  = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
    452
    453	if (adev->asic_type != CHIP_RAVEN) {
    454		def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
    455		def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2);
    456	} else
    457		def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV);
    458
    459	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
    460		data |= ATC_L2_MISC_CG__ENABLE_MASK;
    461
    462		data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
    463		           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
    464		           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
    465		           DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
    466		           DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
    467		           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
    468
    469		if (adev->asic_type != CHIP_RAVEN)
    470			data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
    471			           DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
    472			           DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
    473			           DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
    474			           DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
    475			           DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
    476	} else {
    477		data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
    478
    479		data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
    480			  DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
    481			  DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
    482			  DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
    483			  DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
    484			  DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
    485
    486		if (adev->asic_type != CHIP_RAVEN)
    487			data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
    488			          DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
    489			          DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
    490			          DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
    491			          DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
    492			          DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
    493	}
    494
    495	if (def != data)
    496		WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
    497
    498	if (def1 != data1) {
    499		if (adev->asic_type != CHIP_RAVEN)
    500			WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
    501		else
    502			WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1);
    503	}
    504
    505	if (adev->asic_type != CHIP_RAVEN && def2 != data2)
    506		WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
    507}
    508
    509static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
    510						       bool enable)
    511{
    512	uint32_t def, data;
    513
    514	def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
    515
    516	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
    517		data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
    518	else
    519		data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
    520
    521	if (def != data)
    522		WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
    523}
    524
    525static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
    526			       enum amd_clockgating_state state)
    527{
    528	if (amdgpu_sriov_vf(adev))
    529		return 0;
    530
    531	switch (adev->asic_type) {
    532	case CHIP_VEGA10:
    533	case CHIP_VEGA12:
    534	case CHIP_VEGA20:
    535	case CHIP_RAVEN:
    536	case CHIP_RENOIR:
    537		mmhub_v1_0_update_medium_grain_clock_gating(adev,
    538				state == AMD_CG_STATE_GATE);
    539		mmhub_v1_0_update_medium_grain_light_sleep(adev,
    540				state == AMD_CG_STATE_GATE);
    541		break;
    542	default:
    543		break;
    544	}
    545
    546	return 0;
    547}
    548
    549static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
    550{
    551	int data, data1;
    552
    553	if (amdgpu_sriov_vf(adev))
    554		*flags = 0;
    555
    556	data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
    557
    558	data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
    559
    560	/* AMD_CG_SUPPORT_MC_MGCG */
    561	if ((data & ATC_L2_MISC_CG__ENABLE_MASK) &&
    562	    !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
    563		       DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
    564		       DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
    565		       DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
    566		       DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
    567		       DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
    568		*flags |= AMD_CG_SUPPORT_MC_MGCG;
    569
    570	/* AMD_CG_SUPPORT_MC_LS */
    571	if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
    572		*flags |= AMD_CG_SUPPORT_MC_LS;
    573}
    574
    575static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = {
    576	{ "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
    577	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
    578	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
    579	},
    580	{ "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
    581	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
    582	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
    583	},
    584	{ "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
    585	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
    586	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
    587	},
    588	{ "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
    589	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
    590	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
    591	},
    592	{ "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
    593	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
    594	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
    595	},
    596	{ "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
    597	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
    598	0, 0,
    599	},
    600	{ "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
    601	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
    602	0, 0,
    603	},
    604	{ "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
    605	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
    606	0, 0,
    607	},
    608	{ "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
    609	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
    610	0, 0,
    611	},
    612	{ "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
    613	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
    614	0, 0,
    615	},
    616	{ "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
    617	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
    618	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
    619	},
    620	{ "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
    621	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
    622	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
    623	},
    624	{ "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
    625	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
    626	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
    627	},
    628	{ "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
    629	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
    630	0, 0,
    631	},
    632	{ "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
    633	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
    634	0, 0,
    635	},
    636	{ "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
    637	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
    638	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
    639	},
    640	{ "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
    641	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
    642	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
    643	},
    644	{ "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
    645	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
    646	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
    647	},
    648	{ "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
    649	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
    650	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
    651	},
    652	{ "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
    653	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
    654	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
    655	},
    656	{ "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
    657	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
    658	0, 0,
    659	},
    660	{ "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
    661	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
    662	0, 0,
    663	},
    664	{ "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
    665	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
    666	0, 0,
    667	},
    668	{ "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
    669	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
    670	0, 0,
    671	},
    672	{ "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
    673	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
    674	0, 0,
    675	},
    676	{ "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
    677	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
    678	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
    679	},
    680	{ "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
    681	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
    682	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
    683	},
    684	{ "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
    685	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
    686	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
    687	},
    688	{ "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
    689	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
    690	0, 0,
    691	},
    692	{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
    693	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
    694	0, 0,
    695	}
    696};
    697
    698static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
    699   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0},
    700   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0},
    701   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0},
    702   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
    703};
    704
    705static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev,
    706	const struct soc15_reg_entry *reg,
    707	uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
    708{
    709	uint32_t i;
    710	uint32_t sec_cnt, ded_cnt;
    711
    712	for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) {
    713		if (mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset)
    714			continue;
    715
    716		sec_cnt = (value &
    717				mmhub_v1_0_ras_fields[i].sec_count_mask) >>
    718				mmhub_v1_0_ras_fields[i].sec_count_shift;
    719		if (sec_cnt) {
    720			dev_info(adev->dev,
    721				"MMHUB SubBlock %s, SEC %d\n",
    722				mmhub_v1_0_ras_fields[i].name,
    723				sec_cnt);
    724			*sec_count += sec_cnt;
    725		}
    726
    727		ded_cnt = (value &
    728				mmhub_v1_0_ras_fields[i].ded_count_mask) >>
    729				mmhub_v1_0_ras_fields[i].ded_count_shift;
    730		if (ded_cnt) {
    731			dev_info(adev->dev,
    732				"MMHUB SubBlock %s, DED %d\n",
    733				mmhub_v1_0_ras_fields[i].name,
    734				ded_cnt);
    735			*ded_count += ded_cnt;
    736		}
    737	}
    738
    739	return 0;
    740}
    741
    742static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
    743					   void *ras_error_status)
    744{
    745	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
    746	uint32_t sec_count = 0, ded_count = 0;
    747	uint32_t i;
    748	uint32_t reg_value;
    749
    750	err_data->ue_count = 0;
    751	err_data->ce_count = 0;
    752
    753	for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) {
    754		reg_value =
    755			RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
    756		if (reg_value)
    757			mmhub_v1_0_get_ras_error_count(adev,
    758				&mmhub_v1_0_edc_cnt_regs[i],
    759				reg_value, &sec_count, &ded_count);
    760	}
    761
    762	err_data->ce_count += sec_count;
    763	err_data->ue_count += ded_count;
    764}
    765
    766static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
    767{
    768	uint32_t i;
    769
    770	/* read back edc counter registers to reset the counters to 0 */
    771	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
    772		for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++)
    773			RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
    774	}
    775}
    776
    777struct amdgpu_ras_block_hw_ops mmhub_v1_0_ras_hw_ops = {
    778	.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
    779	.reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
    780};
    781
    782struct amdgpu_mmhub_ras mmhub_v1_0_ras = {
    783	.ras_block = {
    784		.hw_ops = &mmhub_v1_0_ras_hw_ops,
    785	},
    786};
    787
    788const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
    789	.get_fb_location = mmhub_v1_0_get_fb_location,
    790	.init = mmhub_v1_0_init,
    791	.gart_enable = mmhub_v1_0_gart_enable,
    792	.set_fault_enable_default = mmhub_v1_0_set_fault_enable_default,
    793	.gart_disable = mmhub_v1_0_gart_disable,
    794	.set_clockgating = mmhub_v1_0_set_clockgating,
    795	.get_clockgating = mmhub_v1_0_get_clockgating,
    796	.setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs,
    797	.update_power_gating = mmhub_v1_0_update_power_gating,
    798};