cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_fence.c (22172B)


      1/*
      2 * Copyright 2009 Jerome Glisse.
      3 * All Rights Reserved.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining a
      6 * copy of this software and associated documentation files (the
      7 * "Software"), to deal in the Software without restriction, including
      8 * without limitation the rights to use, copy, modify, merge, publish,
      9 * distribute, sub license, and/or sell copies of the Software, and to
     10 * permit persons to whom the Software is furnished to do so, subject to
     11 * the following conditions:
     12 *
     13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
     20 *
     21 * The above copyright notice and this permission notice (including the
     22 * next paragraph) shall be included in all copies or substantial portions
     23 * of the Software.
     24 *
     25 */
     26/*
     27 * Authors:
     28 *    Jerome Glisse <glisse@freedesktop.org>
     29 *    Dave Airlie
     30 */
     31#include <linux/seq_file.h>
     32#include <linux/atomic.h>
     33#include <linux/wait.h>
     34#include <linux/kref.h>
     35#include <linux/slab.h>
     36#include <linux/firmware.h>
     37#include <linux/pm_runtime.h>
     38
     39#include <drm/drm_drv.h>
     40#include "amdgpu.h"
     41#include "amdgpu_trace.h"
     42
     43/*
     44 * Fences
     45 * Fences mark an event in the GPUs pipeline and are used
     46 * for GPU/CPU synchronization.  When the fence is written,
     47 * it is expected that all buffers associated with that fence
     48 * are no longer in use by the associated ring on the GPU and
     49 * that the the relevant GPU caches have been flushed.
     50 */
     51
     52struct amdgpu_fence {
     53	struct dma_fence base;
     54
     55	/* RB, DMA, etc. */
     56	struct amdgpu_ring		*ring;
     57};
     58
     59static struct kmem_cache *amdgpu_fence_slab;
     60
     61int amdgpu_fence_slab_init(void)
     62{
     63	amdgpu_fence_slab = kmem_cache_create(
     64		"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
     65		SLAB_HWCACHE_ALIGN, NULL);
     66	if (!amdgpu_fence_slab)
     67		return -ENOMEM;
     68	return 0;
     69}
     70
     71void amdgpu_fence_slab_fini(void)
     72{
     73	rcu_barrier();
     74	kmem_cache_destroy(amdgpu_fence_slab);
     75}
     76/*
     77 * Cast helper
     78 */
     79static const struct dma_fence_ops amdgpu_fence_ops;
     80static const struct dma_fence_ops amdgpu_job_fence_ops;
     81static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
     82{
     83	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
     84
     85	if (__f->base.ops == &amdgpu_fence_ops ||
     86	    __f->base.ops == &amdgpu_job_fence_ops)
     87		return __f;
     88
     89	return NULL;
     90}
     91
     92/**
     93 * amdgpu_fence_write - write a fence value
     94 *
     95 * @ring: ring the fence is associated with
     96 * @seq: sequence number to write
     97 *
     98 * Writes a fence value to memory (all asics).
     99 */
    100static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
    101{
    102	struct amdgpu_fence_driver *drv = &ring->fence_drv;
    103
    104	if (drv->cpu_addr)
    105		*drv->cpu_addr = cpu_to_le32(seq);
    106}
    107
    108/**
    109 * amdgpu_fence_read - read a fence value
    110 *
    111 * @ring: ring the fence is associated with
    112 *
    113 * Reads a fence value from memory (all asics).
    114 * Returns the value of the fence read from memory.
    115 */
    116static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
    117{
    118	struct amdgpu_fence_driver *drv = &ring->fence_drv;
    119	u32 seq = 0;
    120
    121	if (drv->cpu_addr)
    122		seq = le32_to_cpu(*drv->cpu_addr);
    123	else
    124		seq = atomic_read(&drv->last_seq);
    125
    126	return seq;
    127}
    128
    129/**
    130 * amdgpu_fence_emit - emit a fence on the requested ring
    131 *
    132 * @ring: ring the fence is associated with
    133 * @f: resulting fence object
    134 * @job: job the fence is embedded in
    135 * @flags: flags to pass into the subordinate .emit_fence() call
    136 *
    137 * Emits a fence command on the requested ring (all asics).
    138 * Returns 0 on success, -ENOMEM on failure.
    139 */
    140int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
    141		      unsigned flags)
    142{
    143	struct amdgpu_device *adev = ring->adev;
    144	struct dma_fence *fence;
    145	struct amdgpu_fence *am_fence;
    146	struct dma_fence __rcu **ptr;
    147	uint32_t seq;
    148	int r;
    149
    150	if (job == NULL) {
    151		/* create a sperate hw fence */
    152		am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
    153		if (am_fence == NULL)
    154			return -ENOMEM;
    155		fence = &am_fence->base;
    156		am_fence->ring = ring;
    157	} else {
    158		/* take use of job-embedded fence */
    159		fence = &job->hw_fence;
    160	}
    161
    162	seq = ++ring->fence_drv.sync_seq;
    163	if (job && job->job_run_counter) {
    164		/* reinit seq for resubmitted jobs */
    165		fence->seqno = seq;
    166	} else {
    167		if (job)
    168			dma_fence_init(fence, &amdgpu_job_fence_ops,
    169				       &ring->fence_drv.lock,
    170				       adev->fence_context + ring->idx, seq);
    171		else
    172			dma_fence_init(fence, &amdgpu_fence_ops,
    173				       &ring->fence_drv.lock,
    174				       adev->fence_context + ring->idx, seq);
    175	}
    176
    177	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
    178			       seq, flags | AMDGPU_FENCE_FLAG_INT);
    179	pm_runtime_get_noresume(adev_to_drm(adev)->dev);
    180	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
    181	if (unlikely(rcu_dereference_protected(*ptr, 1))) {
    182		struct dma_fence *old;
    183
    184		rcu_read_lock();
    185		old = dma_fence_get_rcu_safe(ptr);
    186		rcu_read_unlock();
    187
    188		if (old) {
    189			r = dma_fence_wait(old, false);
    190			dma_fence_put(old);
    191			if (r)
    192				return r;
    193		}
    194	}
    195
    196	/* This function can't be called concurrently anyway, otherwise
    197	 * emitting the fence would mess up the hardware ring buffer.
    198	 */
    199	rcu_assign_pointer(*ptr, dma_fence_get(fence));
    200
    201	*f = fence;
    202
    203	return 0;
    204}
    205
    206/**
    207 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
    208 *
    209 * @ring: ring the fence is associated with
    210 * @s: resulting sequence number
    211 * @timeout: the timeout for waiting in usecs
    212 *
    213 * Emits a fence command on the requested ring (all asics).
    214 * Used For polling fence.
    215 * Returns 0 on success, -ENOMEM on failure.
    216 */
    217int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
    218			      uint32_t timeout)
    219{
    220	uint32_t seq;
    221	signed long r;
    222
    223	if (!s)
    224		return -EINVAL;
    225
    226	seq = ++ring->fence_drv.sync_seq;
    227	r = amdgpu_fence_wait_polling(ring,
    228				      seq - ring->fence_drv.num_fences_mask,
    229				      timeout);
    230	if (r < 1)
    231		return -ETIMEDOUT;
    232
    233	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
    234			       seq, 0);
    235
    236	*s = seq;
    237
    238	return 0;
    239}
    240
    241/**
    242 * amdgpu_fence_schedule_fallback - schedule fallback check
    243 *
    244 * @ring: pointer to struct amdgpu_ring
    245 *
    246 * Start a timer as fallback to our interrupts.
    247 */
    248static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
    249{
    250	mod_timer(&ring->fence_drv.fallback_timer,
    251		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
    252}
    253
    254/**
    255 * amdgpu_fence_process - check for fence activity
    256 *
    257 * @ring: pointer to struct amdgpu_ring
    258 *
    259 * Checks the current fence value and calculates the last
    260 * signalled fence value. Wakes the fence queue if the
    261 * sequence number has increased.
    262 *
    263 * Returns true if fence was processed
    264 */
    265bool amdgpu_fence_process(struct amdgpu_ring *ring)
    266{
    267	struct amdgpu_fence_driver *drv = &ring->fence_drv;
    268	struct amdgpu_device *adev = ring->adev;
    269	uint32_t seq, last_seq;
    270
    271	do {
    272		last_seq = atomic_read(&ring->fence_drv.last_seq);
    273		seq = amdgpu_fence_read(ring);
    274
    275	} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
    276
    277	if (del_timer(&ring->fence_drv.fallback_timer) &&
    278	    seq != ring->fence_drv.sync_seq)
    279		amdgpu_fence_schedule_fallback(ring);
    280
    281	if (unlikely(seq == last_seq))
    282		return false;
    283
    284	last_seq &= drv->num_fences_mask;
    285	seq &= drv->num_fences_mask;
    286
    287	do {
    288		struct dma_fence *fence, **ptr;
    289
    290		++last_seq;
    291		last_seq &= drv->num_fences_mask;
    292		ptr = &drv->fences[last_seq];
    293
    294		/* There is always exactly one thread signaling this fence slot */
    295		fence = rcu_dereference_protected(*ptr, 1);
    296		RCU_INIT_POINTER(*ptr, NULL);
    297
    298		if (!fence)
    299			continue;
    300
    301		dma_fence_signal(fence);
    302		dma_fence_put(fence);
    303		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
    304		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
    305	} while (last_seq != seq);
    306
    307	return true;
    308}
    309
    310/**
    311 * amdgpu_fence_fallback - fallback for hardware interrupts
    312 *
    313 * @t: timer context used to obtain the pointer to ring structure
    314 *
    315 * Checks for fence activity.
    316 */
    317static void amdgpu_fence_fallback(struct timer_list *t)
    318{
    319	struct amdgpu_ring *ring = from_timer(ring, t,
    320					      fence_drv.fallback_timer);
    321
    322	if (amdgpu_fence_process(ring))
    323		DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
    324}
    325
    326/**
    327 * amdgpu_fence_wait_empty - wait for all fences to signal
    328 *
    329 * @ring: ring index the fence is associated with
    330 *
    331 * Wait for all fences on the requested ring to signal (all asics).
    332 * Returns 0 if the fences have passed, error for all other cases.
    333 */
    334int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
    335{
    336	uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
    337	struct dma_fence *fence, **ptr;
    338	int r;
    339
    340	if (!seq)
    341		return 0;
    342
    343	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
    344	rcu_read_lock();
    345	fence = rcu_dereference(*ptr);
    346	if (!fence || !dma_fence_get_rcu(fence)) {
    347		rcu_read_unlock();
    348		return 0;
    349	}
    350	rcu_read_unlock();
    351
    352	r = dma_fence_wait(fence, false);
    353	dma_fence_put(fence);
    354	return r;
    355}
    356
    357/**
    358 * amdgpu_fence_wait_polling - busy wait for givn sequence number
    359 *
    360 * @ring: ring index the fence is associated with
    361 * @wait_seq: sequence number to wait
    362 * @timeout: the timeout for waiting in usecs
    363 *
    364 * Wait for all fences on the requested ring to signal (all asics).
    365 * Returns left time if no timeout, 0 or minus if timeout.
    366 */
    367signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
    368				      uint32_t wait_seq,
    369				      signed long timeout)
    370{
    371	uint32_t seq;
    372
    373	do {
    374		seq = amdgpu_fence_read(ring);
    375		udelay(5);
    376		timeout -= 5;
    377	} while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
    378
    379	return timeout > 0 ? timeout : 0;
    380}
    381/**
    382 * amdgpu_fence_count_emitted - get the count of emitted fences
    383 *
    384 * @ring: ring the fence is associated with
    385 *
    386 * Get the number of fences emitted on the requested ring (all asics).
    387 * Returns the number of emitted fences on the ring.  Used by the
    388 * dynpm code to ring track activity.
    389 */
    390unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
    391{
    392	uint64_t emitted;
    393
    394	/* We are not protected by ring lock when reading the last sequence
    395	 * but it's ok to report slightly wrong fence count here.
    396	 */
    397	amdgpu_fence_process(ring);
    398	emitted = 0x100000000ull;
    399	emitted -= atomic_read(&ring->fence_drv.last_seq);
    400	emitted += READ_ONCE(ring->fence_drv.sync_seq);
    401	return lower_32_bits(emitted);
    402}
    403
    404/**
    405 * amdgpu_fence_driver_start_ring - make the fence driver
    406 * ready for use on the requested ring.
    407 *
    408 * @ring: ring to start the fence driver on
    409 * @irq_src: interrupt source to use for this ring
    410 * @irq_type: interrupt type to use for this ring
    411 *
    412 * Make the fence driver ready for processing (all asics).
    413 * Not all asics have all rings, so each asic will only
    414 * start the fence driver on the rings it has.
    415 * Returns 0 for success, errors for failure.
    416 */
    417int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
    418				   struct amdgpu_irq_src *irq_src,
    419				   unsigned irq_type)
    420{
    421	struct amdgpu_device *adev = ring->adev;
    422	uint64_t index;
    423
    424	if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
    425		ring->fence_drv.cpu_addr = ring->fence_cpu_addr;
    426		ring->fence_drv.gpu_addr = ring->fence_gpu_addr;
    427	} else {
    428		/* put fence directly behind firmware */
    429		index = ALIGN(adev->uvd.fw->size, 8);
    430		ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
    431		ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
    432	}
    433	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
    434
    435	ring->fence_drv.irq_src = irq_src;
    436	ring->fence_drv.irq_type = irq_type;
    437	ring->fence_drv.initialized = true;
    438
    439	DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
    440		      ring->name, ring->fence_drv.gpu_addr);
    441	return 0;
    442}
    443
    444/**
    445 * amdgpu_fence_driver_init_ring - init the fence driver
    446 * for the requested ring.
    447 *
    448 * @ring: ring to init the fence driver on
    449 *
    450 * Init the fence driver for the requested ring (all asics).
    451 * Helper function for amdgpu_fence_driver_init().
    452 */
    453int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
    454{
    455	struct amdgpu_device *adev = ring->adev;
    456
    457	if (!adev)
    458		return -EINVAL;
    459
    460	if (!is_power_of_2(ring->num_hw_submission))
    461		return -EINVAL;
    462
    463	ring->fence_drv.cpu_addr = NULL;
    464	ring->fence_drv.gpu_addr = 0;
    465	ring->fence_drv.sync_seq = 0;
    466	atomic_set(&ring->fence_drv.last_seq, 0);
    467	ring->fence_drv.initialized = false;
    468
    469	timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
    470
    471	ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
    472	spin_lock_init(&ring->fence_drv.lock);
    473	ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
    474					 GFP_KERNEL);
    475
    476	if (!ring->fence_drv.fences)
    477		return -ENOMEM;
    478
    479	return 0;
    480}
    481
    482/**
    483 * amdgpu_fence_driver_sw_init - init the fence driver
    484 * for all possible rings.
    485 *
    486 * @adev: amdgpu device pointer
    487 *
    488 * Init the fence driver for all possible rings (all asics).
    489 * Not all asics have all rings, so each asic will only
    490 * start the fence driver on the rings it has using
    491 * amdgpu_fence_driver_start_ring().
    492 * Returns 0 for success.
    493 */
    494int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
    495{
    496	return 0;
    497}
    498
    499/**
    500 * amdgpu_fence_driver_hw_fini - tear down the fence driver
    501 * for all possible rings.
    502 *
    503 * @adev: amdgpu device pointer
    504 *
    505 * Tear down the fence driver for all possible rings (all asics).
    506 */
    507void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
    508{
    509	int i, r;
    510
    511	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
    512		struct amdgpu_ring *ring = adev->rings[i];
    513
    514		if (!ring || !ring->fence_drv.initialized)
    515			continue;
    516
    517		/* You can't wait for HW to signal if it's gone */
    518		if (!drm_dev_is_unplugged(adev_to_drm(adev)))
    519			r = amdgpu_fence_wait_empty(ring);
    520		else
    521			r = -ENODEV;
    522		/* no need to trigger GPU reset as we are unloading */
    523		if (r)
    524			amdgpu_fence_driver_force_completion(ring);
    525
    526		if (ring->fence_drv.irq_src)
    527			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
    528				       ring->fence_drv.irq_type);
    529
    530		del_timer_sync(&ring->fence_drv.fallback_timer);
    531	}
    532}
    533
    534void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
    535{
    536	unsigned int i, j;
    537
    538	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
    539		struct amdgpu_ring *ring = adev->rings[i];
    540
    541		if (!ring || !ring->fence_drv.initialized)
    542			continue;
    543
    544		if (!ring->no_scheduler)
    545			drm_sched_fini(&ring->sched);
    546
    547		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
    548			dma_fence_put(ring->fence_drv.fences[j]);
    549		kfree(ring->fence_drv.fences);
    550		ring->fence_drv.fences = NULL;
    551		ring->fence_drv.initialized = false;
    552	}
    553}
    554
    555/**
    556 * amdgpu_fence_driver_hw_init - enable the fence driver
    557 * for all possible rings.
    558 *
    559 * @adev: amdgpu device pointer
    560 *
    561 * Enable the fence driver for all possible rings (all asics).
    562 * Not all asics have all rings, so each asic will only
    563 * start the fence driver on the rings it has using
    564 * amdgpu_fence_driver_start_ring().
    565 * Returns 0 for success.
    566 */
    567void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
    568{
    569	int i;
    570
    571	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
    572		struct amdgpu_ring *ring = adev->rings[i];
    573		if (!ring || !ring->fence_drv.initialized)
    574			continue;
    575
    576		/* enable the interrupt */
    577		if (ring->fence_drv.irq_src)
    578			amdgpu_irq_get(adev, ring->fence_drv.irq_src,
    579				       ring->fence_drv.irq_type);
    580	}
    581}
    582
    583/**
    584 * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
    585 *
    586 * @ring: fence of the ring to be cleared
    587 *
    588 */
    589void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
    590{
    591	int i;
    592	struct dma_fence *old, **ptr;
    593
    594	for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
    595		ptr = &ring->fence_drv.fences[i];
    596		old = rcu_dereference_protected(*ptr, 1);
    597		if (old && old->ops == &amdgpu_job_fence_ops)
    598			RCU_INIT_POINTER(*ptr, NULL);
    599	}
    600}
    601
    602/**
    603 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
    604 *
    605 * @ring: fence of the ring to signal
    606 *
    607 */
    608void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
    609{
    610	amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
    611	amdgpu_fence_process(ring);
    612}
    613
    614/*
    615 * Common fence implementation
    616 */
    617
    618static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
    619{
    620	return "amdgpu";
    621}
    622
    623static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
    624{
    625	return (const char *)to_amdgpu_fence(f)->ring->name;
    626}
    627
    628static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
    629{
    630	struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
    631
    632	return (const char *)to_amdgpu_ring(job->base.sched)->name;
    633}
    634
    635/**
    636 * amdgpu_fence_enable_signaling - enable signalling on fence
    637 * @f: fence
    638 *
    639 * This function is called with fence_queue lock held, and adds a callback
    640 * to fence_queue that checks if this fence is signaled, and if so it
    641 * signals the fence and removes itself.
    642 */
    643static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
    644{
    645	if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
    646		amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
    647
    648	return true;
    649}
    650
    651/**
    652 * amdgpu_job_fence_enable_signaling - enable signalling on job fence
    653 * @f: fence
    654 *
    655 * This is the simliar function with amdgpu_fence_enable_signaling above, it
    656 * only handles the job embedded fence.
    657 */
    658static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
    659{
    660	struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
    661
    662	if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
    663		amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
    664
    665	return true;
    666}
    667
    668/**
    669 * amdgpu_fence_free - free up the fence memory
    670 *
    671 * @rcu: RCU callback head
    672 *
    673 * Free up the fence memory after the RCU grace period.
    674 */
    675static void amdgpu_fence_free(struct rcu_head *rcu)
    676{
    677	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
    678
    679	/* free fence_slab if it's separated fence*/
    680	kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
    681}
    682
    683/**
    684 * amdgpu_job_fence_free - free up the job with embedded fence
    685 *
    686 * @rcu: RCU callback head
    687 *
    688 * Free up the job with embedded fence after the RCU grace period.
    689 */
    690static void amdgpu_job_fence_free(struct rcu_head *rcu)
    691{
    692	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
    693
    694	/* free job if fence has a parent job */
    695	kfree(container_of(f, struct amdgpu_job, hw_fence));
    696}
    697
    698/**
    699 * amdgpu_fence_release - callback that fence can be freed
    700 *
    701 * @f: fence
    702 *
    703 * This function is called when the reference count becomes zero.
    704 * It just RCU schedules freeing up the fence.
    705 */
    706static void amdgpu_fence_release(struct dma_fence *f)
    707{
    708	call_rcu(&f->rcu, amdgpu_fence_free);
    709}
    710
    711/**
    712 * amdgpu_job_fence_release - callback that job embedded fence can be freed
    713 *
    714 * @f: fence
    715 *
    716 * This is the simliar function with amdgpu_fence_release above, it
    717 * only handles the job embedded fence.
    718 */
    719static void amdgpu_job_fence_release(struct dma_fence *f)
    720{
    721	call_rcu(&f->rcu, amdgpu_job_fence_free);
    722}
    723
    724static const struct dma_fence_ops amdgpu_fence_ops = {
    725	.get_driver_name = amdgpu_fence_get_driver_name,
    726	.get_timeline_name = amdgpu_fence_get_timeline_name,
    727	.enable_signaling = amdgpu_fence_enable_signaling,
    728	.release = amdgpu_fence_release,
    729};
    730
    731static const struct dma_fence_ops amdgpu_job_fence_ops = {
    732	.get_driver_name = amdgpu_fence_get_driver_name,
    733	.get_timeline_name = amdgpu_job_fence_get_timeline_name,
    734	.enable_signaling = amdgpu_job_fence_enable_signaling,
    735	.release = amdgpu_job_fence_release,
    736};
    737
    738/*
    739 * Fence debugfs
    740 */
    741#if defined(CONFIG_DEBUG_FS)
    742static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
    743{
    744	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
    745	int i;
    746
    747	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
    748		struct amdgpu_ring *ring = adev->rings[i];
    749		if (!ring || !ring->fence_drv.initialized)
    750			continue;
    751
    752		amdgpu_fence_process(ring);
    753
    754		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
    755		seq_printf(m, "Last signaled fence          0x%08x\n",
    756			   atomic_read(&ring->fence_drv.last_seq));
    757		seq_printf(m, "Last emitted                 0x%08x\n",
    758			   ring->fence_drv.sync_seq);
    759
    760		if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
    761		    ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
    762			seq_printf(m, "Last signaled trailing fence 0x%08x\n",
    763				   le32_to_cpu(*ring->trail_fence_cpu_addr));
    764			seq_printf(m, "Last emitted                 0x%08x\n",
    765				   ring->trail_seq);
    766		}
    767
    768		if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
    769			continue;
    770
    771		/* set in CP_VMID_PREEMPT and preemption occurred */
    772		seq_printf(m, "Last preempted               0x%08x\n",
    773			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
    774		/* set in CP_VMID_RESET and reset occurred */
    775		seq_printf(m, "Last reset                   0x%08x\n",
    776			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
    777		/* Both preemption and reset occurred */
    778		seq_printf(m, "Last both                    0x%08x\n",
    779			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
    780	}
    781	return 0;
    782}
    783
    784/*
    785 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
    786 *
    787 * Manually trigger a gpu reset at the next fence wait.
    788 */
    789static int gpu_recover_get(void *data, u64 *val)
    790{
    791	struct amdgpu_device *adev = (struct amdgpu_device *)data;
    792	struct drm_device *dev = adev_to_drm(adev);
    793	int r;
    794
    795	r = pm_runtime_get_sync(dev->dev);
    796	if (r < 0) {
    797		pm_runtime_put_autosuspend(dev->dev);
    798		return 0;
    799	}
    800
    801	*val = amdgpu_device_gpu_recover(adev, NULL);
    802
    803	pm_runtime_mark_last_busy(dev->dev);
    804	pm_runtime_put_autosuspend(dev->dev);
    805
    806	return 0;
    807}
    808
    809DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
    810DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
    811			 "%lld\n");
    812
    813#endif
    814
    815void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
    816{
    817#if defined(CONFIG_DEBUG_FS)
    818	struct drm_minor *minor = adev_to_drm(adev)->primary;
    819	struct dentry *root = minor->debugfs_root;
    820
    821	debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
    822			    &amdgpu_debugfs_fence_info_fops);
    823
    824	if (!amdgpu_sriov_vf(adev))
    825		debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
    826				    &amdgpu_debugfs_gpu_recover_fops);
    827#endif
    828}
    829