cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

si_dma.c (23480B)


      1/*
      2 * Copyright 2015 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 * Authors: Alex Deucher
     23 */
     24
     25#include "amdgpu.h"
     26#include "amdgpu_trace.h"
     27#include "si.h"
     28#include "sid.h"
     29
     30const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
     31{
     32	DMA0_REGISTER_OFFSET,
     33	DMA1_REGISTER_OFFSET
     34};
     35
     36static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
     37static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
     38static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
     39static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
     40
     41static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
     42{
     43	return *ring->rptr_cpu_addr;
     44}
     45
     46static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
     47{
     48	struct amdgpu_device *adev = ring->adev;
     49	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
     50
     51	return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
     52}
     53
     54static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
     55{
     56	struct amdgpu_device *adev = ring->adev;
     57	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
     58
     59	WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
     60}
     61
     62static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
     63				struct amdgpu_job *job,
     64				struct amdgpu_ib *ib,
     65				uint32_t flags)
     66{
     67	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
     68	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
     69	 * Pad as necessary with NOPs.
     70	 */
     71	while ((lower_32_bits(ring->wptr) & 7) != 5)
     72		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
     73	amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
     74	amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
     75	amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
     76
     77}
     78
     79/**
     80 * si_dma_ring_emit_fence - emit a fence on the DMA ring
     81 *
     82 * @ring: amdgpu ring pointer
     83 * @addr: address
     84 * @seq: sequence number
     85 * @flags: fence related flags
     86 *
     87 * Add a DMA fence packet to the ring to write
     88 * the fence seq number and DMA trap packet to generate
     89 * an interrupt if needed (VI).
     90 */
     91static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
     92				      unsigned flags)
     93{
     94
     95	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
     96	/* write the fence */
     97	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
     98	amdgpu_ring_write(ring, addr & 0xfffffffc);
     99	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
    100	amdgpu_ring_write(ring, seq);
    101	/* optionally write high bits as well */
    102	if (write64bit) {
    103		addr += 4;
    104		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
    105		amdgpu_ring_write(ring, addr & 0xfffffffc);
    106		amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
    107		amdgpu_ring_write(ring, upper_32_bits(seq));
    108	}
    109	/* generate an interrupt */
    110	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
    111}
    112
    113static void si_dma_stop(struct amdgpu_device *adev)
    114{
    115	struct amdgpu_ring *ring;
    116	u32 rb_cntl;
    117	unsigned i;
    118
    119	for (i = 0; i < adev->sdma.num_instances; i++) {
    120		ring = &adev->sdma.instance[i].ring;
    121		/* dma0 */
    122		rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
    123		rb_cntl &= ~DMA_RB_ENABLE;
    124		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
    125
    126		if (adev->mman.buffer_funcs_ring == ring)
    127			amdgpu_ttm_set_buffer_funcs_status(adev, false);
    128	}
    129}
    130
    131static int si_dma_start(struct amdgpu_device *adev)
    132{
    133	struct amdgpu_ring *ring;
    134	u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
    135	int i, r;
    136	uint64_t rptr_addr;
    137
    138	for (i = 0; i < adev->sdma.num_instances; i++) {
    139		ring = &adev->sdma.instance[i].ring;
    140
    141		WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
    142		WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
    143
    144		/* Set ring buffer size in dwords */
    145		rb_bufsz = order_base_2(ring->ring_size / 4);
    146		rb_cntl = rb_bufsz << 1;
    147#ifdef __BIG_ENDIAN
    148		rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
    149#endif
    150		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
    151
    152		/* Initialize the ring buffer's read and write pointers */
    153		WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
    154		WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
    155
    156		rptr_addr = ring->rptr_gpu_addr;
    157
    158		WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
    159		WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
    160
    161		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
    162
    163		WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
    164
    165		/* enable DMA IBs */
    166		ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
    167#ifdef __BIG_ENDIAN
    168		ib_cntl |= DMA_IB_SWAP_ENABLE;
    169#endif
    170		WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
    171
    172		dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
    173		dma_cntl &= ~CTXEMPTY_INT_ENABLE;
    174		WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
    175
    176		ring->wptr = 0;
    177		WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
    178		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
    179
    180		ring->sched.ready = true;
    181
    182		r = amdgpu_ring_test_helper(ring);
    183		if (r)
    184			return r;
    185
    186		if (adev->mman.buffer_funcs_ring == ring)
    187			amdgpu_ttm_set_buffer_funcs_status(adev, true);
    188	}
    189
    190	return 0;
    191}
    192
    193/**
    194 * si_dma_ring_test_ring - simple async dma engine test
    195 *
    196 * @ring: amdgpu_ring structure holding ring information
    197 *
    198 * Test the DMA engine by writing using it to write an
    199 * value to memory. (VI).
    200 * Returns 0 for success, error for failure.
    201 */
    202static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
    203{
    204	struct amdgpu_device *adev = ring->adev;
    205	unsigned i;
    206	unsigned index;
    207	int r;
    208	u32 tmp;
    209	u64 gpu_addr;
    210
    211	r = amdgpu_device_wb_get(adev, &index);
    212	if (r)
    213		return r;
    214
    215	gpu_addr = adev->wb.gpu_addr + (index * 4);
    216	tmp = 0xCAFEDEAD;
    217	adev->wb.wb[index] = cpu_to_le32(tmp);
    218
    219	r = amdgpu_ring_alloc(ring, 4);
    220	if (r)
    221		goto error_free_wb;
    222
    223	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
    224	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
    225	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
    226	amdgpu_ring_write(ring, 0xDEADBEEF);
    227	amdgpu_ring_commit(ring);
    228
    229	for (i = 0; i < adev->usec_timeout; i++) {
    230		tmp = le32_to_cpu(adev->wb.wb[index]);
    231		if (tmp == 0xDEADBEEF)
    232			break;
    233		udelay(1);
    234	}
    235
    236	if (i >= adev->usec_timeout)
    237		r = -ETIMEDOUT;
    238
    239error_free_wb:
    240	amdgpu_device_wb_free(adev, index);
    241	return r;
    242}
    243
    244/**
    245 * si_dma_ring_test_ib - test an IB on the DMA engine
    246 *
    247 * @ring: amdgpu_ring structure holding ring information
    248 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
    249 *
    250 * Test a simple IB in the DMA ring (VI).
    251 * Returns 0 on success, error on failure.
    252 */
    253static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
    254{
    255	struct amdgpu_device *adev = ring->adev;
    256	struct amdgpu_ib ib;
    257	struct dma_fence *f = NULL;
    258	unsigned index;
    259	u32 tmp = 0;
    260	u64 gpu_addr;
    261	long r;
    262
    263	r = amdgpu_device_wb_get(adev, &index);
    264	if (r)
    265		return r;
    266
    267	gpu_addr = adev->wb.gpu_addr + (index * 4);
    268	tmp = 0xCAFEDEAD;
    269	adev->wb.wb[index] = cpu_to_le32(tmp);
    270	memset(&ib, 0, sizeof(ib));
    271	r = amdgpu_ib_get(adev, NULL, 256,
    272					AMDGPU_IB_POOL_DIRECT, &ib);
    273	if (r)
    274		goto err0;
    275
    276	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
    277	ib.ptr[1] = lower_32_bits(gpu_addr);
    278	ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
    279	ib.ptr[3] = 0xDEADBEEF;
    280	ib.length_dw = 4;
    281	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
    282	if (r)
    283		goto err1;
    284
    285	r = dma_fence_wait_timeout(f, false, timeout);
    286	if (r == 0) {
    287		r = -ETIMEDOUT;
    288		goto err1;
    289	} else if (r < 0) {
    290		goto err1;
    291	}
    292	tmp = le32_to_cpu(adev->wb.wb[index]);
    293	if (tmp == 0xDEADBEEF)
    294		r = 0;
    295	else
    296		r = -EINVAL;
    297
    298err1:
    299	amdgpu_ib_free(adev, &ib, NULL);
    300	dma_fence_put(f);
    301err0:
    302	amdgpu_device_wb_free(adev, index);
    303	return r;
    304}
    305
    306/**
    307 * si_dma_vm_copy_pte - update PTEs by copying them from the GART
    308 *
    309 * @ib: indirect buffer to fill with commands
    310 * @pe: addr of the page entry
    311 * @src: src addr to copy from
    312 * @count: number of page entries to update
    313 *
    314 * Update PTEs by copying them from the GART using DMA (SI).
    315 */
    316static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
    317			       uint64_t pe, uint64_t src,
    318			       unsigned count)
    319{
    320	unsigned bytes = count * 8;
    321
    322	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
    323					      1, 0, 0, bytes);
    324	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
    325	ib->ptr[ib->length_dw++] = lower_32_bits(src);
    326	ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
    327	ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
    328}
    329
    330/**
    331 * si_dma_vm_write_pte - update PTEs by writing them manually
    332 *
    333 * @ib: indirect buffer to fill with commands
    334 * @pe: addr of the page entry
    335 * @value: dst addr to write into pe
    336 * @count: number of page entries to update
    337 * @incr: increase next addr by incr bytes
    338 *
    339 * Update PTEs by writing them manually using DMA (SI).
    340 */
    341static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
    342				uint64_t value, unsigned count,
    343				uint32_t incr)
    344{
    345	unsigned ndw = count * 2;
    346
    347	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
    348	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
    349	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
    350	for (; ndw > 0; ndw -= 2) {
    351		ib->ptr[ib->length_dw++] = lower_32_bits(value);
    352		ib->ptr[ib->length_dw++] = upper_32_bits(value);
    353		value += incr;
    354	}
    355}
    356
    357/**
    358 * si_dma_vm_set_pte_pde - update the page tables using sDMA
    359 *
    360 * @ib: indirect buffer to fill with commands
    361 * @pe: addr of the page entry
    362 * @addr: dst addr to write into pe
    363 * @count: number of page entries to update
    364 * @incr: increase next addr by incr bytes
    365 * @flags: access flags
    366 *
    367 * Update the page tables using sDMA (CIK).
    368 */
    369static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
    370				     uint64_t pe,
    371				     uint64_t addr, unsigned count,
    372				     uint32_t incr, uint64_t flags)
    373{
    374	uint64_t value;
    375	unsigned ndw;
    376
    377	while (count) {
    378		ndw = count * 2;
    379		if (ndw > 0xFFFFE)
    380			ndw = 0xFFFFE;
    381
    382		if (flags & AMDGPU_PTE_VALID)
    383			value = addr;
    384		else
    385			value = 0;
    386
    387		/* for physically contiguous pages (vram) */
    388		ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
    389		ib->ptr[ib->length_dw++] = pe; /* dst addr */
    390		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
    391		ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
    392		ib->ptr[ib->length_dw++] = upper_32_bits(flags);
    393		ib->ptr[ib->length_dw++] = value; /* value */
    394		ib->ptr[ib->length_dw++] = upper_32_bits(value);
    395		ib->ptr[ib->length_dw++] = incr; /* increment size */
    396		ib->ptr[ib->length_dw++] = 0;
    397		pe += ndw * 4;
    398		addr += (ndw / 2) * incr;
    399		count -= ndw / 2;
    400	}
    401}
    402
    403/**
    404 * si_dma_ring_pad_ib - pad the IB to the required number of dw
    405 *
    406 * @ring: amdgpu_ring pointer
    407 * @ib: indirect buffer to fill with padding
    408 *
    409 */
    410static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
    411{
    412	while (ib->length_dw & 0x7)
    413		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
    414}
    415
    416/**
    417 * si_dma_ring_emit_pipeline_sync - sync the pipeline
    418 *
    419 * @ring: amdgpu_ring pointer
    420 *
    421 * Make sure all previous operations are completed (CIK).
    422 */
    423static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
    424{
    425	uint32_t seq = ring->fence_drv.sync_seq;
    426	uint64_t addr = ring->fence_drv.gpu_addr;
    427
    428	/* wait for idle */
    429	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
    430			  (1 << 27)); /* Poll memory */
    431	amdgpu_ring_write(ring, lower_32_bits(addr));
    432	amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
    433	amdgpu_ring_write(ring, 0xffffffff); /* mask */
    434	amdgpu_ring_write(ring, seq); /* value */
    435	amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
    436}
    437
    438/**
    439 * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
    440 *
    441 * @ring: amdgpu_ring pointer
    442 * @vmid: vmid number to use
    443 * @pd_addr: address
    444 *
    445 * Update the page table base and flush the VM TLB
    446 * using sDMA (VI).
    447 */
    448static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
    449				      unsigned vmid, uint64_t pd_addr)
    450{
    451	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
    452
    453	/* wait for invalidate to complete */
    454	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
    455	amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
    456	amdgpu_ring_write(ring, 0xff << 16); /* retry */
    457	amdgpu_ring_write(ring, 1 << vmid); /* mask */
    458	amdgpu_ring_write(ring, 0); /* value */
    459	amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
    460}
    461
    462static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
    463				  uint32_t reg, uint32_t val)
    464{
    465	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
    466	amdgpu_ring_write(ring, (0xf << 16) | reg);
    467	amdgpu_ring_write(ring, val);
    468}
    469
    470static int si_dma_early_init(void *handle)
    471{
    472	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    473
    474	adev->sdma.num_instances = 2;
    475
    476	si_dma_set_ring_funcs(adev);
    477	si_dma_set_buffer_funcs(adev);
    478	si_dma_set_vm_pte_funcs(adev);
    479	si_dma_set_irq_funcs(adev);
    480
    481	return 0;
    482}
    483
    484static int si_dma_sw_init(void *handle)
    485{
    486	struct amdgpu_ring *ring;
    487	int r, i;
    488	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    489
    490	/* DMA0 trap event */
    491	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
    492			      &adev->sdma.trap_irq);
    493	if (r)
    494		return r;
    495
    496	/* DMA1 trap event */
    497	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244,
    498			      &adev->sdma.trap_irq);
    499	if (r)
    500		return r;
    501
    502	for (i = 0; i < adev->sdma.num_instances; i++) {
    503		ring = &adev->sdma.instance[i].ring;
    504		ring->ring_obj = NULL;
    505		ring->use_doorbell = false;
    506		sprintf(ring->name, "sdma%d", i);
    507		r = amdgpu_ring_init(adev, ring, 1024,
    508				     &adev->sdma.trap_irq,
    509				     (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
    510				     AMDGPU_SDMA_IRQ_INSTANCE1,
    511				     AMDGPU_RING_PRIO_DEFAULT, NULL);
    512		if (r)
    513			return r;
    514	}
    515
    516	return r;
    517}
    518
    519static int si_dma_sw_fini(void *handle)
    520{
    521	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    522	int i;
    523
    524	for (i = 0; i < adev->sdma.num_instances; i++)
    525		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
    526
    527	return 0;
    528}
    529
    530static int si_dma_hw_init(void *handle)
    531{
    532	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    533
    534	return si_dma_start(adev);
    535}
    536
    537static int si_dma_hw_fini(void *handle)
    538{
    539	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    540
    541	si_dma_stop(adev);
    542
    543	return 0;
    544}
    545
    546static int si_dma_suspend(void *handle)
    547{
    548	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    549
    550	return si_dma_hw_fini(adev);
    551}
    552
    553static int si_dma_resume(void *handle)
    554{
    555	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    556
    557	return si_dma_hw_init(adev);
    558}
    559
    560static bool si_dma_is_idle(void *handle)
    561{
    562	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    563	u32 tmp = RREG32(SRBM_STATUS2);
    564
    565	if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
    566	    return false;
    567
    568	return true;
    569}
    570
    571static int si_dma_wait_for_idle(void *handle)
    572{
    573	unsigned i;
    574	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    575
    576	for (i = 0; i < adev->usec_timeout; i++) {
    577		if (si_dma_is_idle(handle))
    578			return 0;
    579		udelay(1);
    580	}
    581	return -ETIMEDOUT;
    582}
    583
    584static int si_dma_soft_reset(void *handle)
    585{
    586	DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
    587	return 0;
    588}
    589
    590static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
    591					struct amdgpu_irq_src *src,
    592					unsigned type,
    593					enum amdgpu_interrupt_state state)
    594{
    595	u32 sdma_cntl;
    596
    597	switch (type) {
    598	case AMDGPU_SDMA_IRQ_INSTANCE0:
    599		switch (state) {
    600		case AMDGPU_IRQ_STATE_DISABLE:
    601			sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
    602			sdma_cntl &= ~TRAP_ENABLE;
    603			WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
    604			break;
    605		case AMDGPU_IRQ_STATE_ENABLE:
    606			sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
    607			sdma_cntl |= TRAP_ENABLE;
    608			WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
    609			break;
    610		default:
    611			break;
    612		}
    613		break;
    614	case AMDGPU_SDMA_IRQ_INSTANCE1:
    615		switch (state) {
    616		case AMDGPU_IRQ_STATE_DISABLE:
    617			sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
    618			sdma_cntl &= ~TRAP_ENABLE;
    619			WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
    620			break;
    621		case AMDGPU_IRQ_STATE_ENABLE:
    622			sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
    623			sdma_cntl |= TRAP_ENABLE;
    624			WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
    625			break;
    626		default:
    627			break;
    628		}
    629		break;
    630	default:
    631		break;
    632	}
    633	return 0;
    634}
    635
    636static int si_dma_process_trap_irq(struct amdgpu_device *adev,
    637				      struct amdgpu_irq_src *source,
    638				      struct amdgpu_iv_entry *entry)
    639{
    640	if (entry->src_id == 224)
    641		amdgpu_fence_process(&adev->sdma.instance[0].ring);
    642	else
    643		amdgpu_fence_process(&adev->sdma.instance[1].ring);
    644	return 0;
    645}
    646
    647static int si_dma_set_clockgating_state(void *handle,
    648					  enum amd_clockgating_state state)
    649{
    650	u32 orig, data, offset;
    651	int i;
    652	bool enable;
    653	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    654
    655	enable = (state == AMD_CG_STATE_GATE);
    656
    657	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
    658		for (i = 0; i < adev->sdma.num_instances; i++) {
    659			if (i == 0)
    660				offset = DMA0_REGISTER_OFFSET;
    661			else
    662				offset = DMA1_REGISTER_OFFSET;
    663			orig = data = RREG32(DMA_POWER_CNTL + offset);
    664			data &= ~MEM_POWER_OVERRIDE;
    665			if (data != orig)
    666				WREG32(DMA_POWER_CNTL + offset, data);
    667			WREG32(DMA_CLK_CTRL + offset, 0x00000100);
    668		}
    669	} else {
    670		for (i = 0; i < adev->sdma.num_instances; i++) {
    671			if (i == 0)
    672				offset = DMA0_REGISTER_OFFSET;
    673			else
    674				offset = DMA1_REGISTER_OFFSET;
    675			orig = data = RREG32(DMA_POWER_CNTL + offset);
    676			data |= MEM_POWER_OVERRIDE;
    677			if (data != orig)
    678				WREG32(DMA_POWER_CNTL + offset, data);
    679
    680			orig = data = RREG32(DMA_CLK_CTRL + offset);
    681			data = 0xff000000;
    682			if (data != orig)
    683				WREG32(DMA_CLK_CTRL + offset, data);
    684		}
    685	}
    686
    687	return 0;
    688}
    689
    690static int si_dma_set_powergating_state(void *handle,
    691					  enum amd_powergating_state state)
    692{
    693	u32 tmp;
    694
    695	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    696
    697	WREG32(DMA_PGFSM_WRITE,  0x00002000);
    698	WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
    699
    700	for (tmp = 0; tmp < 5; tmp++)
    701		WREG32(DMA_PGFSM_WRITE, 0);
    702
    703	return 0;
    704}
    705
    706static const struct amd_ip_funcs si_dma_ip_funcs = {
    707	.name = "si_dma",
    708	.early_init = si_dma_early_init,
    709	.late_init = NULL,
    710	.sw_init = si_dma_sw_init,
    711	.sw_fini = si_dma_sw_fini,
    712	.hw_init = si_dma_hw_init,
    713	.hw_fini = si_dma_hw_fini,
    714	.suspend = si_dma_suspend,
    715	.resume = si_dma_resume,
    716	.is_idle = si_dma_is_idle,
    717	.wait_for_idle = si_dma_wait_for_idle,
    718	.soft_reset = si_dma_soft_reset,
    719	.set_clockgating_state = si_dma_set_clockgating_state,
    720	.set_powergating_state = si_dma_set_powergating_state,
    721};
    722
    723static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
    724	.type = AMDGPU_RING_TYPE_SDMA,
    725	.align_mask = 0xf,
    726	.nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
    727	.support_64bit_ptrs = false,
    728	.get_rptr = si_dma_ring_get_rptr,
    729	.get_wptr = si_dma_ring_get_wptr,
    730	.set_wptr = si_dma_ring_set_wptr,
    731	.emit_frame_size =
    732		3 + 3 + /* hdp flush / invalidate */
    733		6 + /* si_dma_ring_emit_pipeline_sync */
    734		SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */
    735		9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
    736	.emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
    737	.emit_ib = si_dma_ring_emit_ib,
    738	.emit_fence = si_dma_ring_emit_fence,
    739	.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
    740	.emit_vm_flush = si_dma_ring_emit_vm_flush,
    741	.test_ring = si_dma_ring_test_ring,
    742	.test_ib = si_dma_ring_test_ib,
    743	.insert_nop = amdgpu_ring_insert_nop,
    744	.pad_ib = si_dma_ring_pad_ib,
    745	.emit_wreg = si_dma_ring_emit_wreg,
    746};
    747
    748static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
    749{
    750	int i;
    751
    752	for (i = 0; i < adev->sdma.num_instances; i++)
    753		adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
    754}
    755
    756static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
    757	.set = si_dma_set_trap_irq_state,
    758	.process = si_dma_process_trap_irq,
    759};
    760
    761static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
    762{
    763	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
    764	adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
    765}
    766
    767/**
    768 * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
    769 *
    770 * @ib: indirect buffer to copy to
    771 * @src_offset: src GPU address
    772 * @dst_offset: dst GPU address
    773 * @byte_count: number of bytes to xfer
    774 * @tmz: is this a secure operation
    775 *
    776 * Copy GPU buffers using the DMA engine (VI).
    777 * Used by the amdgpu ttm implementation to move pages if
    778 * registered as the asic copy callback.
    779 */
    780static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
    781				       uint64_t src_offset,
    782				       uint64_t dst_offset,
    783				       uint32_t byte_count,
    784				       bool tmz)
    785{
    786	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
    787					      1, 0, 0, byte_count);
    788	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
    789	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
    790	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
    791	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
    792}
    793
    794/**
    795 * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
    796 *
    797 * @ib: indirect buffer to copy to
    798 * @src_data: value to write to buffer
    799 * @dst_offset: dst GPU address
    800 * @byte_count: number of bytes to xfer
    801 *
    802 * Fill GPU buffers using the DMA engine (VI).
    803 */
    804static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
    805				       uint32_t src_data,
    806				       uint64_t dst_offset,
    807				       uint32_t byte_count)
    808{
    809	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
    810					      0, 0, 0, byte_count / 4);
    811	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
    812	ib->ptr[ib->length_dw++] = src_data;
    813	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
    814}
    815
    816
    817static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
    818	.copy_max_bytes = 0xffff8,
    819	.copy_num_dw = 5,
    820	.emit_copy_buffer = si_dma_emit_copy_buffer,
    821
    822	.fill_max_bytes = 0xffff8,
    823	.fill_num_dw = 4,
    824	.emit_fill_buffer = si_dma_emit_fill_buffer,
    825};
    826
    827static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
    828{
    829	adev->mman.buffer_funcs = &si_dma_buffer_funcs;
    830	adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
    831}
    832
    833static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
    834	.copy_pte_num_dw = 5,
    835	.copy_pte = si_dma_vm_copy_pte,
    836
    837	.write_pte = si_dma_vm_write_pte,
    838	.set_pte_pde = si_dma_vm_set_pte_pde,
    839};
    840
    841static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
    842{
    843	unsigned i;
    844
    845	adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
    846	for (i = 0; i < adev->sdma.num_instances; i++) {
    847		adev->vm_manager.vm_pte_scheds[i] =
    848			&adev->sdma.instance[i].ring.sched;
    849	}
    850	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
    851}
    852
    853const struct amdgpu_ip_block_version si_dma_ip_block =
    854{
    855	.type = AMD_IP_BLOCK_TYPE_SDMA,
    856	.major = 1,
    857	.minor = 0,
    858	.rev = 0,
    859	.funcs = &si_dma_ip_funcs,
    860};