cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

si_ih.c (7766B)


      1/*
      2 * Copyright 2015 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#include <linux/pci.h>
     25
     26#include "amdgpu.h"
     27#include "amdgpu_ih.h"
     28#include "sid.h"
     29#include "si_ih.h"
     30#include "oss/oss_1_0_d.h"
     31#include "oss/oss_1_0_sh_mask.h"
     32
     33static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
     34
     35static void si_ih_enable_interrupts(struct amdgpu_device *adev)
     36{
     37	u32 ih_cntl = RREG32(IH_CNTL);
     38	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
     39
     40	ih_cntl |= ENABLE_INTR;
     41	ih_rb_cntl |= IH_RB_ENABLE;
     42	WREG32(IH_CNTL, ih_cntl);
     43	WREG32(IH_RB_CNTL, ih_rb_cntl);
     44	adev->irq.ih.enabled = true;
     45}
     46
     47static void si_ih_disable_interrupts(struct amdgpu_device *adev)
     48{
     49	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
     50	u32 ih_cntl = RREG32(IH_CNTL);
     51
     52	ih_rb_cntl &= ~IH_RB_ENABLE;
     53	ih_cntl &= ~ENABLE_INTR;
     54	WREG32(IH_RB_CNTL, ih_rb_cntl);
     55	WREG32(IH_CNTL, ih_cntl);
     56	WREG32(IH_RB_RPTR, 0);
     57	WREG32(IH_RB_WPTR, 0);
     58	adev->irq.ih.enabled = false;
     59	adev->irq.ih.rptr = 0;
     60}
     61
     62static int si_ih_irq_init(struct amdgpu_device *adev)
     63{
     64	struct amdgpu_ih_ring *ih = &adev->irq.ih;
     65	int rb_bufsz;
     66	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
     67
     68	si_ih_disable_interrupts(adev);
     69	/* set dummy read address to dummy page address */
     70	WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
     71	interrupt_cntl = RREG32(INTERRUPT_CNTL);
     72	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
     73	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
     74	WREG32(INTERRUPT_CNTL, interrupt_cntl);
     75
     76	WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
     77	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
     78
     79	ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
     80		     IH_WPTR_OVERFLOW_CLEAR |
     81		     (rb_bufsz << 1) |
     82		     IH_WPTR_WRITEBACK_ENABLE;
     83
     84	WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
     85	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
     86	WREG32(IH_RB_CNTL, ih_rb_cntl);
     87	WREG32(IH_RB_RPTR, 0);
     88	WREG32(IH_RB_WPTR, 0);
     89
     90	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
     91	if (adev->irq.msi_enabled)
     92		ih_cntl |= RPTR_REARM;
     93	WREG32(IH_CNTL, ih_cntl);
     94
     95	pci_set_master(adev->pdev);
     96	si_ih_enable_interrupts(adev);
     97
     98	return 0;
     99}
    100
    101static void si_ih_irq_disable(struct amdgpu_device *adev)
    102{
    103	si_ih_disable_interrupts(adev);
    104	mdelay(1);
    105}
    106
    107static u32 si_ih_get_wptr(struct amdgpu_device *adev,
    108			  struct amdgpu_ih_ring *ih)
    109{
    110	u32 wptr, tmp;
    111
    112	wptr = le32_to_cpu(*ih->wptr_cpu);
    113
    114	if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
    115		wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
    116		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
    117			wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
    118		ih->rptr = (wptr + 16) & ih->ptr_mask;
    119		tmp = RREG32(IH_RB_CNTL);
    120		tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
    121		WREG32(IH_RB_CNTL, tmp);
    122	}
    123	return (wptr & ih->ptr_mask);
    124}
    125
    126static void si_ih_decode_iv(struct amdgpu_device *adev,
    127			    struct amdgpu_ih_ring *ih,
    128			    struct amdgpu_iv_entry *entry)
    129{
    130	u32 ring_index = ih->rptr >> 2;
    131	uint32_t dw[4];
    132
    133	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
    134	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
    135	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
    136	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
    137
    138	entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
    139	entry->src_id = dw[0] & 0xff;
    140	entry->src_data[0] = dw[1] & 0xfffffff;
    141	entry->ring_id = dw[2] & 0xff;
    142	entry->vmid = (dw[2] >> 8) & 0xff;
    143
    144	ih->rptr += 16;
    145}
    146
    147static void si_ih_set_rptr(struct amdgpu_device *adev,
    148			   struct amdgpu_ih_ring *ih)
    149{
    150	WREG32(IH_RB_RPTR, ih->rptr);
    151}
    152
    153static int si_ih_early_init(void *handle)
    154{
    155	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    156
    157	si_ih_set_interrupt_funcs(adev);
    158
    159	return 0;
    160}
    161
    162static int si_ih_sw_init(void *handle)
    163{
    164	int r;
    165	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    166
    167	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
    168	if (r)
    169		return r;
    170
    171	return amdgpu_irq_init(adev);
    172}
    173
    174static int si_ih_sw_fini(void *handle)
    175{
    176	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    177
    178	amdgpu_irq_fini_sw(adev);
    179
    180	return 0;
    181}
    182
    183static int si_ih_hw_init(void *handle)
    184{
    185	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    186
    187	return si_ih_irq_init(adev);
    188}
    189
    190static int si_ih_hw_fini(void *handle)
    191{
    192	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    193
    194	si_ih_irq_disable(adev);
    195
    196	return 0;
    197}
    198
    199static int si_ih_suspend(void *handle)
    200{
    201	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    202
    203	return si_ih_hw_fini(adev);
    204}
    205
    206static int si_ih_resume(void *handle)
    207{
    208	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    209
    210	return si_ih_hw_init(adev);
    211}
    212
    213static bool si_ih_is_idle(void *handle)
    214{
    215	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    216	u32 tmp = RREG32(SRBM_STATUS);
    217
    218	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
    219		return false;
    220
    221	return true;
    222}
    223
    224static int si_ih_wait_for_idle(void *handle)
    225{
    226	unsigned i;
    227	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    228
    229	for (i = 0; i < adev->usec_timeout; i++) {
    230		if (si_ih_is_idle(handle))
    231			return 0;
    232		udelay(1);
    233	}
    234	return -ETIMEDOUT;
    235}
    236
    237static int si_ih_soft_reset(void *handle)
    238{
    239	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    240
    241	u32 srbm_soft_reset = 0;
    242	u32 tmp = RREG32(SRBM_STATUS);
    243
    244	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
    245		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
    246
    247	if (srbm_soft_reset) {
    248		tmp = RREG32(SRBM_SOFT_RESET);
    249		tmp |= srbm_soft_reset;
    250		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
    251		WREG32(SRBM_SOFT_RESET, tmp);
    252		tmp = RREG32(SRBM_SOFT_RESET);
    253
    254		udelay(50);
    255
    256		tmp &= ~srbm_soft_reset;
    257		WREG32(SRBM_SOFT_RESET, tmp);
    258		tmp = RREG32(SRBM_SOFT_RESET);
    259
    260		udelay(50);
    261	}
    262
    263	return 0;
    264}
    265
    266static int si_ih_set_clockgating_state(void *handle,
    267					  enum amd_clockgating_state state)
    268{
    269	return 0;
    270}
    271
    272static int si_ih_set_powergating_state(void *handle,
    273					  enum amd_powergating_state state)
    274{
    275	return 0;
    276}
    277
    278static const struct amd_ip_funcs si_ih_ip_funcs = {
    279	.name = "si_ih",
    280	.early_init = si_ih_early_init,
    281	.late_init = NULL,
    282	.sw_init = si_ih_sw_init,
    283	.sw_fini = si_ih_sw_fini,
    284	.hw_init = si_ih_hw_init,
    285	.hw_fini = si_ih_hw_fini,
    286	.suspend = si_ih_suspend,
    287	.resume = si_ih_resume,
    288	.is_idle = si_ih_is_idle,
    289	.wait_for_idle = si_ih_wait_for_idle,
    290	.soft_reset = si_ih_soft_reset,
    291	.set_clockgating_state = si_ih_set_clockgating_state,
    292	.set_powergating_state = si_ih_set_powergating_state,
    293};
    294
    295static const struct amdgpu_ih_funcs si_ih_funcs = {
    296	.get_wptr = si_ih_get_wptr,
    297	.decode_iv = si_ih_decode_iv,
    298	.set_rptr = si_ih_set_rptr
    299};
    300
    301static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
    302{
    303	adev->irq.ih_funcs = &si_ih_funcs;
    304}
    305
    306const struct amdgpu_ip_block_version si_ih_ip_block =
    307{
    308	.type = AMD_IP_BLOCK_TYPE_IH,
    309	.major = 1,
    310	.minor = 0,
    311	.rev = 0,
    312	.funcs = &si_ih_ip_funcs,
    313};