cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_amdkfd_gfx_v10.c (22278B)


      1/*
      2 * Copyright 2019 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 */
     22#include "amdgpu.h"
     23#include "amdgpu_amdkfd.h"
     24#include "gc/gc_10_1_0_offset.h"
     25#include "gc/gc_10_1_0_sh_mask.h"
     26#include "athub/athub_2_0_0_offset.h"
     27#include "athub/athub_2_0_0_sh_mask.h"
     28#include "oss/osssys_5_0_0_offset.h"
     29#include "oss/osssys_5_0_0_sh_mask.h"
     30#include "soc15_common.h"
     31#include "v10_structs.h"
     32#include "nv.h"
     33#include "nvd.h"
     34
     35enum hqd_dequeue_request_type {
     36	NO_ACTION = 0,
     37	DRAIN_PIPE,
     38	RESET_WAVES,
     39	SAVE_WAVES
     40};
     41
     42static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
     43			uint32_t queue, uint32_t vmid)
     44{
     45	mutex_lock(&adev->srbm_mutex);
     46	nv_grbm_select(adev, mec, pipe, queue, vmid);
     47}
     48
     49static void unlock_srbm(struct amdgpu_device *adev)
     50{
     51	nv_grbm_select(adev, 0, 0, 0, 0);
     52	mutex_unlock(&adev->srbm_mutex);
     53}
     54
     55static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
     56				uint32_t queue_id)
     57{
     58	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
     59	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
     60
     61	lock_srbm(adev, mec, pipe, queue_id, 0);
     62}
     63
     64static uint64_t get_queue_mask(struct amdgpu_device *adev,
     65			       uint32_t pipe_id, uint32_t queue_id)
     66{
     67	unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
     68			queue_id;
     69
     70	return 1ull << bit;
     71}
     72
     73static void release_queue(struct amdgpu_device *adev)
     74{
     75	unlock_srbm(adev);
     76}
     77
     78static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
     79					uint32_t sh_mem_config,
     80					uint32_t sh_mem_ape1_base,
     81					uint32_t sh_mem_ape1_limit,
     82					uint32_t sh_mem_bases)
     83{
     84	lock_srbm(adev, 0, 0, 0, vmid);
     85
     86	WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
     87	WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
     88	/* APE1 no longer exists on GFX9 */
     89
     90	unlock_srbm(adev);
     91}
     92
     93static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
     94					unsigned int vmid)
     95{
     96	/*
     97	 * We have to assume that there is no outstanding mapping.
     98	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
     99	 * a mapping is in progress or because a mapping finished
    100	 * and the SW cleared it.
    101	 * So the protocol is to always wait & clear.
    102	 */
    103	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
    104			ATC_VMID0_PASID_MAPPING__VALID_MASK;
    105
    106	pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
    107
    108	pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
    109	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
    110	       pasid_mapping);
    111
    112#if 0
    113	/* TODO: uncomment this code when the hardware support is ready. */
    114	while (!(RREG32(SOC15_REG_OFFSET(
    115				ATHUB, 0,
    116				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
    117		 (1U << vmid)))
    118		cpu_relax();
    119
    120	pr_debug("ATHUB mapping update finished\n");
    121	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
    122				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
    123	       1U << vmid);
    124#endif
    125
    126	/* Mapping vmid to pasid also for IH block */
    127	pr_debug("update mapping for IH block and mmhub");
    128	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
    129	       pasid_mapping);
    130
    131	return 0;
    132}
    133
    134/* TODO - RING0 form of field is obsolete, seems to date back to SI
    135 * but still works
    136 */
    137
    138static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
    139{
    140	uint32_t mec;
    141	uint32_t pipe;
    142
    143	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
    144	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
    145
    146	lock_srbm(adev, mec, pipe, 0, 0);
    147
    148	WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
    149		CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
    150		CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
    151
    152	unlock_srbm(adev);
    153
    154	return 0;
    155}
    156
    157static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
    158				unsigned int engine_id,
    159				unsigned int queue_id)
    160{
    161	uint32_t sdma_engine_reg_base[2] = {
    162		SOC15_REG_OFFSET(SDMA0, 0,
    163				 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
    164		/* On gfx10, mmSDMA1_xxx registers are defined NOT based
    165		 * on SDMA1 base address (dw 0x1860) but based on SDMA0
    166		 * base address (dw 0x1260). Therefore use mmSDMA0_RLC0_RB_CNTL
    167		 * instead of mmSDMA1_RLC0_RB_CNTL for the base address calc
    168		 * below
    169		 */
    170		SOC15_REG_OFFSET(SDMA1, 0,
    171				 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
    172	};
    173
    174	uint32_t retval = sdma_engine_reg_base[engine_id]
    175		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
    176
    177	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
    178			queue_id, retval);
    179
    180	return retval;
    181}
    182
    183#if 0
    184static uint32_t get_watch_base_addr(struct amdgpu_device *adev)
    185{
    186	uint32_t retval = SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) -
    187			mmTCP_WATCH0_ADDR_H;
    188
    189	pr_debug("kfd: reg watch base address: 0x%x\n", retval);
    190
    191	return retval;
    192}
    193#endif
    194
    195static inline struct v10_compute_mqd *get_mqd(void *mqd)
    196{
    197	return (struct v10_compute_mqd *)mqd;
    198}
    199
    200static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
    201{
    202	return (struct v10_sdma_mqd *)mqd;
    203}
    204
    205static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
    206			uint32_t pipe_id, uint32_t queue_id,
    207			uint32_t __user *wptr, uint32_t wptr_shift,
    208			uint32_t wptr_mask, struct mm_struct *mm)
    209{
    210	struct v10_compute_mqd *m;
    211	uint32_t *mqd_hqd;
    212	uint32_t reg, hqd_base, data;
    213
    214	m = get_mqd(mqd);
    215
    216	pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
    217	acquire_queue(adev, pipe_id, queue_id);
    218
    219	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
    220	mqd_hqd = &m->cp_mqd_base_addr_lo;
    221	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
    222
    223	for (reg = hqd_base;
    224	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
    225		WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]);
    226
    227
    228	/* Activate doorbell logic before triggering WPTR poll. */
    229	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
    230			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
    231	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data);
    232
    233	if (wptr) {
    234		/* Don't read wptr with get_user because the user
    235		 * context may not be accessible (if this function
    236		 * runs in a work queue). Instead trigger a one-shot
    237		 * polling read from memory in the CP. This assumes
    238		 * that wptr is GPU-accessible in the queue's VMID via
    239		 * ATC or SVM. WPTR==RPTR before starting the poll so
    240		 * the CP starts fetching new commands from the right
    241		 * place.
    242		 *
    243		 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
    244		 * tricky. Assume that the queue didn't overflow. The
    245		 * number of valid bits in the 32-bit RPTR depends on
    246		 * the queue size. The remaining bits are taken from
    247		 * the saved 64-bit WPTR. If the WPTR wrapped, add the
    248		 * queue size.
    249		 */
    250		uint32_t queue_size =
    251			2 << REG_GET_FIELD(m->cp_hqd_pq_control,
    252					   CP_HQD_PQ_CONTROL, QUEUE_SIZE);
    253		uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
    254
    255		if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
    256			guessed_wptr += queue_size;
    257		guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
    258		guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
    259
    260		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
    261		       lower_32_bits(guessed_wptr));
    262		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
    263		       upper_32_bits(guessed_wptr));
    264		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
    265		       lower_32_bits((uint64_t)wptr));
    266		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
    267		       upper_32_bits((uint64_t)wptr));
    268		pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
    269			 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
    270		WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
    271		       (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
    272	}
    273
    274	/* Start the EOP fetcher */
    275	WREG32_SOC15(GC, 0, mmCP_HQD_EOP_RPTR,
    276	       REG_SET_FIELD(m->cp_hqd_eop_rptr,
    277			     CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
    278
    279	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
    280	WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
    281
    282	release_queue(adev);
    283
    284	return 0;
    285}
    286
    287static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
    288			    uint32_t pipe_id, uint32_t queue_id,
    289			    uint32_t doorbell_off)
    290{
    291	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
    292	struct v10_compute_mqd *m;
    293	uint32_t mec, pipe;
    294	int r;
    295
    296	m = get_mqd(mqd);
    297
    298	acquire_queue(adev, pipe_id, queue_id);
    299
    300	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
    301	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
    302
    303	pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
    304		 mec, pipe, queue_id);
    305
    306	spin_lock(&adev->gfx.kiq.ring_lock);
    307	r = amdgpu_ring_alloc(kiq_ring, 7);
    308	if (r) {
    309		pr_err("Failed to alloc KIQ (%d).\n", r);
    310		goto out_unlock;
    311	}
    312
    313	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
    314	amdgpu_ring_write(kiq_ring,
    315			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
    316			  PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
    317			  PACKET3_MAP_QUEUES_QUEUE(queue_id) |
    318			  PACKET3_MAP_QUEUES_PIPE(pipe) |
    319			  PACKET3_MAP_QUEUES_ME((mec - 1)) |
    320			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
    321			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
    322			  PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
    323			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
    324	amdgpu_ring_write(kiq_ring,
    325			  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
    326	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
    327	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
    328	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
    329	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
    330	amdgpu_ring_commit(kiq_ring);
    331
    332out_unlock:
    333	spin_unlock(&adev->gfx.kiq.ring_lock);
    334	release_queue(adev);
    335
    336	return r;
    337}
    338
    339static int kgd_hqd_dump(struct amdgpu_device *adev,
    340			uint32_t pipe_id, uint32_t queue_id,
    341			uint32_t (**dump)[2], uint32_t *n_regs)
    342{
    343	uint32_t i = 0, reg;
    344#define HQD_N_REGS 56
    345#define DUMP_REG(addr) do {				\
    346		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
    347			break;				\
    348		(*dump)[i][0] = (addr) << 2;		\
    349		(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr);		\
    350	} while (0)
    351
    352	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
    353	if (*dump == NULL)
    354		return -ENOMEM;
    355
    356	acquire_queue(adev, pipe_id, queue_id);
    357
    358	for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
    359	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
    360		DUMP_REG(reg);
    361
    362	release_queue(adev);
    363
    364	WARN_ON_ONCE(i != HQD_N_REGS);
    365	*n_regs = i;
    366
    367	return 0;
    368}
    369
    370static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
    371			     uint32_t __user *wptr, struct mm_struct *mm)
    372{
    373	struct v10_sdma_mqd *m;
    374	uint32_t sdma_rlc_reg_offset;
    375	unsigned long end_jiffies;
    376	uint32_t data;
    377	uint64_t data64;
    378	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
    379
    380	m = get_sdma_mqd(mqd);
    381	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
    382					    m->sdma_queue_id);
    383
    384	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
    385		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
    386
    387	end_jiffies = msecs_to_jiffies(2000) + jiffies;
    388	while (true) {
    389		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
    390		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
    391			break;
    392		if (time_after(jiffies, end_jiffies)) {
    393			pr_err("SDMA RLC not idle in %s\n", __func__);
    394			return -ETIME;
    395		}
    396		usleep_range(500, 1000);
    397	}
    398
    399	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
    400	       m->sdmax_rlcx_doorbell_offset);
    401
    402	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
    403			     ENABLE, 1);
    404	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
    405	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
    406				m->sdmax_rlcx_rb_rptr);
    407	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
    408				m->sdmax_rlcx_rb_rptr_hi);
    409
    410	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
    411	if (read_user_wptr(mm, wptr64, data64)) {
    412		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
    413		       lower_32_bits(data64));
    414		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
    415		       upper_32_bits(data64));
    416	} else {
    417		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
    418		       m->sdmax_rlcx_rb_rptr);
    419		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
    420		       m->sdmax_rlcx_rb_rptr_hi);
    421	}
    422	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
    423
    424	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
    425	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
    426			m->sdmax_rlcx_rb_base_hi);
    427	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
    428			m->sdmax_rlcx_rb_rptr_addr_lo);
    429	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
    430			m->sdmax_rlcx_rb_rptr_addr_hi);
    431
    432	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
    433			     RB_ENABLE, 1);
    434	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
    435
    436	return 0;
    437}
    438
    439static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
    440			     uint32_t engine_id, uint32_t queue_id,
    441			     uint32_t (**dump)[2], uint32_t *n_regs)
    442{
    443	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
    444			engine_id, queue_id);
    445	uint32_t i = 0, reg;
    446#undef HQD_N_REGS
    447#define HQD_N_REGS (19+6+7+10)
    448
    449	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
    450	if (*dump == NULL)
    451		return -ENOMEM;
    452
    453	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
    454		DUMP_REG(sdma_rlc_reg_offset + reg);
    455	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
    456		DUMP_REG(sdma_rlc_reg_offset + reg);
    457	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
    458	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
    459		DUMP_REG(sdma_rlc_reg_offset + reg);
    460	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
    461	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
    462		DUMP_REG(sdma_rlc_reg_offset + reg);
    463
    464	WARN_ON_ONCE(i != HQD_N_REGS);
    465	*n_regs = i;
    466
    467	return 0;
    468}
    469
    470static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
    471				uint64_t queue_address, uint32_t pipe_id,
    472				uint32_t queue_id)
    473{
    474	uint32_t act;
    475	bool retval = false;
    476	uint32_t low, high;
    477
    478	acquire_queue(adev, pipe_id, queue_id);
    479	act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
    480	if (act) {
    481		low = lower_32_bits(queue_address >> 8);
    482		high = upper_32_bits(queue_address >> 8);
    483
    484		if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
    485		   high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
    486			retval = true;
    487	}
    488	release_queue(adev);
    489	return retval;
    490}
    491
    492static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
    493{
    494	struct v10_sdma_mqd *m;
    495	uint32_t sdma_rlc_reg_offset;
    496	uint32_t sdma_rlc_rb_cntl;
    497
    498	m = get_sdma_mqd(mqd);
    499	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
    500					    m->sdma_queue_id);
    501
    502	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
    503
    504	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
    505		return true;
    506
    507	return false;
    508}
    509
    510static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
    511				enum kfd_preempt_type reset_type,
    512				unsigned int utimeout, uint32_t pipe_id,
    513				uint32_t queue_id)
    514{
    515	enum hqd_dequeue_request_type type;
    516	unsigned long end_jiffies;
    517	uint32_t temp;
    518	struct v10_compute_mqd *m = get_mqd(mqd);
    519
    520	if (amdgpu_in_reset(adev))
    521		return -EIO;
    522
    523#if 0
    524	unsigned long flags;
    525	int retry;
    526#endif
    527
    528	acquire_queue(adev, pipe_id, queue_id);
    529
    530	if (m->cp_hqd_vmid == 0)
    531		WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
    532
    533	switch (reset_type) {
    534	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
    535		type = DRAIN_PIPE;
    536		break;
    537	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
    538		type = RESET_WAVES;
    539		break;
    540	case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
    541		type = SAVE_WAVES;
    542		break;
    543	default:
    544		type = DRAIN_PIPE;
    545		break;
    546	}
    547
    548#if 0 /* Is this still needed? */
    549	/* Workaround: If IQ timer is active and the wait time is close to or
    550	 * equal to 0, dequeueing is not safe. Wait until either the wait time
    551	 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
    552	 * cleared before continuing. Also, ensure wait times are set to at
    553	 * least 0x3.
    554	 */
    555	local_irq_save(flags);
    556	preempt_disable();
    557	retry = 5000; /* wait for 500 usecs at maximum */
    558	while (true) {
    559		temp = RREG32(mmCP_HQD_IQ_TIMER);
    560		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
    561			pr_debug("HW is processing IQ\n");
    562			goto loop;
    563		}
    564		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
    565			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
    566					== 3) /* SEM-rearm is safe */
    567				break;
    568			/* Wait time 3 is safe for CP, but our MMIO read/write
    569			 * time is close to 1 microsecond, so check for 10 to
    570			 * leave more buffer room
    571			 */
    572			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
    573					>= 10)
    574				break;
    575			pr_debug("IQ timer is active\n");
    576		} else
    577			break;
    578loop:
    579		if (!retry) {
    580			pr_err("CP HQD IQ timer status time out\n");
    581			break;
    582		}
    583		ndelay(100);
    584		--retry;
    585	}
    586	retry = 1000;
    587	while (true) {
    588		temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
    589		if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
    590			break;
    591		pr_debug("Dequeue request is pending\n");
    592
    593		if (!retry) {
    594			pr_err("CP HQD dequeue request time out\n");
    595			break;
    596		}
    597		ndelay(100);
    598		--retry;
    599	}
    600	local_irq_restore(flags);
    601	preempt_enable();
    602#endif
    603
    604	WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type);
    605
    606	end_jiffies = (utimeout * HZ / 1000) + jiffies;
    607	while (true) {
    608		temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
    609		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
    610			break;
    611		if (time_after(jiffies, end_jiffies)) {
    612			pr_err("cp queue preemption time out.\n");
    613			release_queue(adev);
    614			return -ETIME;
    615		}
    616		usleep_range(500, 1000);
    617	}
    618
    619	release_queue(adev);
    620	return 0;
    621}
    622
    623static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
    624				unsigned int utimeout)
    625{
    626	struct v10_sdma_mqd *m;
    627	uint32_t sdma_rlc_reg_offset;
    628	uint32_t temp;
    629	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
    630
    631	m = get_sdma_mqd(mqd);
    632	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
    633					    m->sdma_queue_id);
    634
    635	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
    636	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
    637	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
    638
    639	while (true) {
    640		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
    641		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
    642			break;
    643		if (time_after(jiffies, end_jiffies)) {
    644			pr_err("SDMA RLC not idle in %s\n", __func__);
    645			return -ETIME;
    646		}
    647		usleep_range(500, 1000);
    648	}
    649
    650	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
    651	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
    652		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
    653		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
    654
    655	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
    656	m->sdmax_rlcx_rb_rptr_hi =
    657		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
    658
    659	return 0;
    660}
    661
    662static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
    663					uint8_t vmid, uint16_t *p_pasid)
    664{
    665	uint32_t value;
    666
    667	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
    668		     + vmid);
    669	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
    670
    671	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
    672}
    673
    674static int kgd_wave_control_execute(struct amdgpu_device *adev,
    675					uint32_t gfx_index_val,
    676					uint32_t sq_cmd)
    677{
    678	uint32_t data = 0;
    679
    680	mutex_lock(&adev->grbm_idx_mutex);
    681
    682	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
    683	WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd);
    684
    685	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
    686		INSTANCE_BROADCAST_WRITES, 1);
    687	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
    688		SA_BROADCAST_WRITES, 1);
    689	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
    690		SE_BROADCAST_WRITES, 1);
    691
    692	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
    693	mutex_unlock(&adev->grbm_idx_mutex);
    694
    695	return 0;
    696}
    697
    698static void set_vm_context_page_table_base(struct amdgpu_device *adev,
    699		uint32_t vmid, uint64_t page_table_base)
    700{
    701	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
    702		pr_err("trying to set page table base for wrong VMID %u\n",
    703		       vmid);
    704		return;
    705	}
    706
    707	/* SDMA is on gfxhub as well for Navi1* series */
    708	adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
    709}
    710
    711static void program_trap_handler_settings(struct amdgpu_device *adev,
    712		uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
    713{
    714	lock_srbm(adev, 0, 0, 0, vmid);
    715
    716	/*
    717	 * Program TBA registers
    718	 */
    719	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
    720			lower_32_bits(tba_addr >> 8));
    721	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
    722			upper_32_bits(tba_addr >> 8) |
    723			(1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
    724
    725	/*
    726	 * Program TMA registers
    727	 */
    728	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
    729			lower_32_bits(tma_addr >> 8));
    730	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
    731			upper_32_bits(tma_addr >> 8));
    732
    733	unlock_srbm(adev);
    734}
    735
    736const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
    737	.program_sh_mem_settings = kgd_program_sh_mem_settings,
    738	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
    739	.init_interrupts = kgd_init_interrupts,
    740	.hqd_load = kgd_hqd_load,
    741	.hiq_mqd_load = kgd_hiq_mqd_load,
    742	.hqd_sdma_load = kgd_hqd_sdma_load,
    743	.hqd_dump = kgd_hqd_dump,
    744	.hqd_sdma_dump = kgd_hqd_sdma_dump,
    745	.hqd_is_occupied = kgd_hqd_is_occupied,
    746	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
    747	.hqd_destroy = kgd_hqd_destroy,
    748	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
    749	.wave_control_execute = kgd_wave_control_execute,
    750	.get_atc_vmid_pasid_mapping_info =
    751			get_atc_vmid_pasid_mapping_info,
    752	.set_vm_context_page_table_base = set_vm_context_page_table_base,
    753	.program_trap_handler_settings = program_trap_handler_settings,
    754};