cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hdp_v5_0.c (8437B)


      1/*
      2 * Copyright 2020 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23#include "amdgpu.h"
     24#include "amdgpu_atombios.h"
     25#include "hdp_v5_0.h"
     26
     27#include "hdp/hdp_5_0_0_offset.h"
     28#include "hdp/hdp_5_0_0_sh_mask.h"
     29#include <uapi/linux/kfd_ioctl.h>
     30
     31static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
     32				struct amdgpu_ring *ring)
     33{
     34	if (!ring || !ring->funcs->emit_wreg)
     35		WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
     36	else
     37		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
     38}
     39
     40static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
     41				    struct amdgpu_ring *ring)
     42{
     43	if (!ring || !ring->funcs->emit_wreg) {
     44		WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
     45	} else {
     46		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
     47					HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
     48	}
     49}
     50
     51static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev,
     52					  bool enable)
     53{
     54	uint32_t hdp_clk_cntl, hdp_clk_cntl1;
     55	uint32_t hdp_mem_pwr_cntl;
     56
     57	if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
     58				AMD_CG_SUPPORT_HDP_DS |
     59				AMD_CG_SUPPORT_HDP_SD)))
     60		return;
     61
     62	hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
     63	hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
     64
     65	/* Before doing clock/power mode switch,
     66	 * forced on IPH & RC clock */
     67	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
     68				     IPH_MEM_CLK_SOFT_OVERRIDE, 1);
     69	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
     70				     RC_MEM_CLK_SOFT_OVERRIDE, 1);
     71	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
     72
     73	/* HDP 5.0 doesn't support dynamic power mode switch,
     74	 * disable clock and power gating before any changing */
     75	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
     76					 IPH_MEM_POWER_CTRL_EN, 0);
     77	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
     78					 IPH_MEM_POWER_LS_EN, 0);
     79	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
     80					 IPH_MEM_POWER_DS_EN, 0);
     81	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
     82					 IPH_MEM_POWER_SD_EN, 0);
     83	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
     84					 RC_MEM_POWER_CTRL_EN, 0);
     85	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
     86					 RC_MEM_POWER_LS_EN, 0);
     87	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
     88					 RC_MEM_POWER_DS_EN, 0);
     89	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
     90					 RC_MEM_POWER_SD_EN, 0);
     91	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
     92
     93	/* Already disabled above. The actions below are for "enabled" only */
     94	if (enable) {
     95		/* only one clock gating mode (LS/DS/SD) can be enabled */
     96		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
     97			hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
     98							 HDP_MEM_POWER_CTRL,
     99							 IPH_MEM_POWER_LS_EN, 1);
    100			hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    101							 HDP_MEM_POWER_CTRL,
    102							 RC_MEM_POWER_LS_EN, 1);
    103		} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
    104			hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    105							 HDP_MEM_POWER_CTRL,
    106							 IPH_MEM_POWER_DS_EN, 1);
    107			hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    108							 HDP_MEM_POWER_CTRL,
    109							 RC_MEM_POWER_DS_EN, 1);
    110		} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
    111			hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    112							 HDP_MEM_POWER_CTRL,
    113							 IPH_MEM_POWER_SD_EN, 1);
    114			/* RC should not use shut down mode, fallback to ds  or ls if allowed */
    115			if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS)
    116				hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    117								 HDP_MEM_POWER_CTRL,
    118								 RC_MEM_POWER_DS_EN, 1);
    119			else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)
    120				hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    121								 HDP_MEM_POWER_CTRL,
    122								 RC_MEM_POWER_LS_EN, 1);
    123		}
    124
    125		/* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
    126		 * be set for SRAM LS/DS/SD */
    127		if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
    128				      AMD_CG_SUPPORT_HDP_SD)) {
    129			hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    130							 IPH_MEM_POWER_CTRL_EN, 1);
    131			hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    132							 RC_MEM_POWER_CTRL_EN, 1);
    133			WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
    134		}
    135	}
    136
    137	/* disable IPH & RC clock override after clock/power mode changing */
    138	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
    139				     IPH_MEM_CLK_SOFT_OVERRIDE, 0);
    140	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
    141				     RC_MEM_CLK_SOFT_OVERRIDE, 0);
    142	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
    143}
    144
    145static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
    146						      bool enable)
    147{
    148	uint32_t hdp_clk_cntl;
    149
    150	if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
    151		return;
    152
    153	hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
    154
    155	if (enable) {
    156		hdp_clk_cntl &=
    157			~(uint32_t)
    158			(HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
    159			 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
    160			 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
    161			 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
    162			 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
    163			 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
    164	} else {
    165		hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
    166			HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
    167			HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
    168			HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
    169			HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
    170			HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
    171	}
    172
    173	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
    174}
    175
    176static void hdp_v5_0_update_clock_gating(struct amdgpu_device *adev,
    177					      bool enable)
    178{
    179	hdp_v5_0_update_mem_power_gating(adev, enable);
    180	hdp_v5_0_update_medium_grain_clock_gating(adev, enable);
    181}
    182
    183static void hdp_v5_0_get_clockgating_state(struct amdgpu_device *adev,
    184					    u64 *flags)
    185{
    186	uint32_t tmp;
    187
    188	/* AMD_CG_SUPPORT_HDP_MGCG */
    189	tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
    190	if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
    191		     HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
    192		     HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
    193		     HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
    194		     HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
    195		     HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
    196		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
    197
    198	/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
    199	tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
    200	if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
    201		*flags |= AMD_CG_SUPPORT_HDP_LS;
    202	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
    203		*flags |= AMD_CG_SUPPORT_HDP_DS;
    204	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
    205		*flags |= AMD_CG_SUPPORT_HDP_SD;
    206}
    207
    208static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
    209{
    210	u32 tmp;
    211
    212	tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
    213	tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
    214	WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
    215}
    216
    217const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
    218	.flush_hdp = hdp_v5_0_flush_hdp,
    219	.invalidate_hdp = hdp_v5_0_invalidate_hdp,
    220	.update_clock_gating = hdp_v5_0_update_clock_gating,
    221	.get_clock_gating_state = hdp_v5_0_get_clockgating_state,
    222	.init_registers = hdp_v5_0_init_registers,
    223};