cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_sdma.c (3944B)


      1/*
      2 * Copyright 2018 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#include "amdgpu.h"
     25#include "amdgpu_sdma.h"
     26#include "amdgpu_ras.h"
     27
     28#define AMDGPU_CSA_SDMA_SIZE 64
     29/* SDMA CSA reside in the 3rd page of CSA */
     30#define AMDGPU_CSA_SDMA_OFFSET (4096 * 2)
     31
     32/*
     33 * GPU SDMA IP block helpers function.
     34 */
     35
     36struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
     37{
     38	struct amdgpu_device *adev = ring->adev;
     39	int i;
     40
     41	for (i = 0; i < adev->sdma.num_instances; i++)
     42		if (ring == &adev->sdma.instance[i].ring ||
     43		    ring == &adev->sdma.instance[i].page)
     44			return &adev->sdma.instance[i];
     45
     46	return NULL;
     47}
     48
     49int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
     50{
     51	struct amdgpu_device *adev = ring->adev;
     52	int i;
     53
     54	for (i = 0; i < adev->sdma.num_instances; i++) {
     55		if (ring == &adev->sdma.instance[i].ring ||
     56			ring == &adev->sdma.instance[i].page) {
     57			*index = i;
     58			return 0;
     59		}
     60	}
     61
     62	return -EINVAL;
     63}
     64
     65uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
     66				     unsigned vmid)
     67{
     68	struct amdgpu_device *adev = ring->adev;
     69	uint64_t csa_mc_addr;
     70	uint32_t index = 0;
     71	int r;
     72
     73	/* don't enable OS preemption on SDMA under SRIOV */
     74	if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
     75		return 0;
     76
     77	if (ring->is_mes_queue) {
     78		uint32_t offset = 0;
     79
     80		offset = offsetof(struct amdgpu_mes_ctx_meta_data,
     81				  sdma[ring->idx].sdma_meta_data);
     82		csa_mc_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
     83	} else {
     84		r = amdgpu_sdma_get_index_from_ring(ring, &index);
     85
     86		if (r || index > 31)
     87			csa_mc_addr = 0;
     88		else
     89			csa_mc_addr = amdgpu_csa_vaddr(adev) +
     90				AMDGPU_CSA_SDMA_OFFSET +
     91				index * AMDGPU_CSA_SDMA_SIZE;
     92	}
     93
     94	return csa_mc_addr;
     95}
     96
     97int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
     98			      struct ras_common_if *ras_block)
     99{
    100	int r, i;
    101
    102	r = amdgpu_ras_block_late_init(adev, ras_block);
    103	if (r)
    104		return r;
    105
    106	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
    107		for (i = 0; i < adev->sdma.num_instances; i++) {
    108			r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
    109				AMDGPU_SDMA_IRQ_INSTANCE0 + i);
    110			if (r)
    111				goto late_fini;
    112		}
    113	}
    114
    115	return 0;
    116
    117late_fini:
    118	amdgpu_ras_block_late_fini(adev, ras_block);
    119	return r;
    120}
    121
    122int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
    123		void *err_data,
    124		struct amdgpu_iv_entry *entry)
    125{
    126	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
    127
    128	if (amdgpu_sriov_vf(adev))
    129		return AMDGPU_RAS_SUCCESS;
    130
    131	amdgpu_ras_reset_gpu(adev);
    132
    133	return AMDGPU_RAS_SUCCESS;
    134}
    135
    136int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
    137				      struct amdgpu_irq_src *source,
    138				      struct amdgpu_iv_entry *entry)
    139{
    140	struct ras_common_if *ras_if = adev->sdma.ras_if;
    141	struct ras_dispatch_if ih_data = {
    142		.entry = entry,
    143	};
    144
    145	if (!ras_if)
    146		return 0;
    147
    148	ih_data.head = *ras_if;
    149
    150	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
    151	return 0;
    152}