cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dma-fence.c (30246B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Fence mechanism for dma-buf and to allow for asynchronous dma access
      4 *
      5 * Copyright (C) 2012 Canonical Ltd
      6 * Copyright (C) 2012 Texas Instruments
      7 *
      8 * Authors:
      9 * Rob Clark <robdclark@gmail.com>
     10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
     11 */
     12
     13#include <linux/slab.h>
     14#include <linux/export.h>
     15#include <linux/atomic.h>
     16#include <linux/dma-fence.h>
     17#include <linux/sched/signal.h>
     18#include <linux/seq_file.h>
     19
     20#define CREATE_TRACE_POINTS
     21#include <trace/events/dma_fence.h>
     22
     23EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
     24EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
     25EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
     26
     27static DEFINE_SPINLOCK(dma_fence_stub_lock);
     28static struct dma_fence dma_fence_stub;
     29
     30/*
     31 * fence context counter: each execution context should have its own
     32 * fence context, this allows checking if fences belong to the same
     33 * context or not. One device can have multiple separate contexts,
     34 * and they're used if some engine can run independently of another.
     35 */
     36static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
     37
     38/**
     39 * DOC: DMA fences overview
     40 *
     41 * DMA fences, represented by &struct dma_fence, are the kernel internal
     42 * synchronization primitive for DMA operations like GPU rendering, video
     43 * encoding/decoding, or displaying buffers on a screen.
     44 *
     45 * A fence is initialized using dma_fence_init() and completed using
     46 * dma_fence_signal(). Fences are associated with a context, allocated through
     47 * dma_fence_context_alloc(), and all fences on the same context are
     48 * fully ordered.
     49 *
     50 * Since the purposes of fences is to facilitate cross-device and
     51 * cross-application synchronization, there's multiple ways to use one:
     52 *
     53 * - Individual fences can be exposed as a &sync_file, accessed as a file
     54 *   descriptor from userspace, created by calling sync_file_create(). This is
     55 *   called explicit fencing, since userspace passes around explicit
     56 *   synchronization points.
     57 *
     58 * - Some subsystems also have their own explicit fencing primitives, like
     59 *   &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
     60 *   fence to be updated.
     61 *
     62 * - Then there's also implicit fencing, where the synchronization points are
     63 *   implicitly passed around as part of shared &dma_buf instances. Such
     64 *   implicit fences are stored in &struct dma_resv through the
     65 *   &dma_buf.resv pointer.
     66 */
     67
     68/**
     69 * DOC: fence cross-driver contract
     70 *
     71 * Since &dma_fence provide a cross driver contract, all drivers must follow the
     72 * same rules:
     73 *
     74 * * Fences must complete in a reasonable time. Fences which represent kernels
     75 *   and shaders submitted by userspace, which could run forever, must be backed
     76 *   up by timeout and gpu hang recovery code. Minimally that code must prevent
     77 *   further command submission and force complete all in-flight fences, e.g.
     78 *   when the driver or hardware do not support gpu reset, or if the gpu reset
     79 *   failed for some reason. Ideally the driver supports gpu recovery which only
     80 *   affects the offending userspace context, and no other userspace
     81 *   submissions.
     82 *
     83 * * Drivers may have different ideas of what completion within a reasonable
     84 *   time means. Some hang recovery code uses a fixed timeout, others a mix
     85 *   between observing forward progress and increasingly strict timeouts.
     86 *   Drivers should not try to second guess timeout handling of fences from
     87 *   other drivers.
     88 *
     89 * * To ensure there's no deadlocks of dma_fence_wait() against other locks
     90 *   drivers should annotate all code required to reach dma_fence_signal(),
     91 *   which completes the fences, with dma_fence_begin_signalling() and
     92 *   dma_fence_end_signalling().
     93 *
     94 * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock().
     95 *   This means any code required for fence completion cannot acquire a
     96 *   &dma_resv lock. Note that this also pulls in the entire established
     97 *   locking hierarchy around dma_resv_lock() and dma_resv_unlock().
     98 *
     99 * * Drivers are allowed to call dma_fence_wait() from their &shrinker
    100 *   callbacks. This means any code required for fence completion cannot
    101 *   allocate memory with GFP_KERNEL.
    102 *
    103 * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
    104 *   respectively &mmu_interval_notifier callbacks. This means any code required
    105 *   for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO.
    106 *   Only GFP_ATOMIC is permissible, which might fail.
    107 *
    108 * Note that only GPU drivers have a reasonable excuse for both requiring
    109 * &mmu_interval_notifier and &shrinker callbacks at the same time as having to
    110 * track asynchronous compute work using &dma_fence. No driver outside of
    111 * drivers/gpu should ever call dma_fence_wait() in such contexts.
    112 */
    113
    114static const char *dma_fence_stub_get_name(struct dma_fence *fence)
    115{
    116        return "stub";
    117}
    118
    119static const struct dma_fence_ops dma_fence_stub_ops = {
    120	.get_driver_name = dma_fence_stub_get_name,
    121	.get_timeline_name = dma_fence_stub_get_name,
    122};
    123
    124/**
    125 * dma_fence_get_stub - return a signaled fence
    126 *
    127 * Return a stub fence which is already signaled. The fence's
    128 * timestamp corresponds to the first time after boot this
    129 * function is called.
    130 */
    131struct dma_fence *dma_fence_get_stub(void)
    132{
    133	spin_lock(&dma_fence_stub_lock);
    134	if (!dma_fence_stub.ops) {
    135		dma_fence_init(&dma_fence_stub,
    136			       &dma_fence_stub_ops,
    137			       &dma_fence_stub_lock,
    138			       0, 0);
    139		dma_fence_signal_locked(&dma_fence_stub);
    140	}
    141	spin_unlock(&dma_fence_stub_lock);
    142
    143	return dma_fence_get(&dma_fence_stub);
    144}
    145EXPORT_SYMBOL(dma_fence_get_stub);
    146
    147/**
    148 * dma_fence_allocate_private_stub - return a private, signaled fence
    149 *
    150 * Return a newly allocated and signaled stub fence.
    151 */
    152struct dma_fence *dma_fence_allocate_private_stub(void)
    153{
    154	struct dma_fence *fence;
    155
    156	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
    157	if (fence == NULL)
    158		return ERR_PTR(-ENOMEM);
    159
    160	dma_fence_init(fence,
    161		       &dma_fence_stub_ops,
    162		       &dma_fence_stub_lock,
    163		       0, 0);
    164	dma_fence_signal(fence);
    165
    166	return fence;
    167}
    168EXPORT_SYMBOL(dma_fence_allocate_private_stub);
    169
    170/**
    171 * dma_fence_context_alloc - allocate an array of fence contexts
    172 * @num: amount of contexts to allocate
    173 *
    174 * This function will return the first index of the number of fence contexts
    175 * allocated.  The fence context is used for setting &dma_fence.context to a
    176 * unique number by passing the context to dma_fence_init().
    177 */
    178u64 dma_fence_context_alloc(unsigned num)
    179{
    180	WARN_ON(!num);
    181	return atomic64_fetch_add(num, &dma_fence_context_counter);
    182}
    183EXPORT_SYMBOL(dma_fence_context_alloc);
    184
    185/**
    186 * DOC: fence signalling annotation
    187 *
    188 * Proving correctness of all the kernel code around &dma_fence through code
    189 * review and testing is tricky for a few reasons:
    190 *
    191 * * It is a cross-driver contract, and therefore all drivers must follow the
    192 *   same rules for lock nesting order, calling contexts for various functions
    193 *   and anything else significant for in-kernel interfaces. But it is also
    194 *   impossible to test all drivers in a single machine, hence brute-force N vs.
    195 *   N testing of all combinations is impossible. Even just limiting to the
    196 *   possible combinations is infeasible.
    197 *
    198 * * There is an enormous amount of driver code involved. For render drivers
    199 *   there's the tail of command submission, after fences are published,
    200 *   scheduler code, interrupt and workers to process job completion,
    201 *   and timeout, gpu reset and gpu hang recovery code. Plus for integration
    202 *   with core mm with have &mmu_notifier, respectively &mmu_interval_notifier,
    203 *   and &shrinker. For modesetting drivers there's the commit tail functions
    204 *   between when fences for an atomic modeset are published, and when the
    205 *   corresponding vblank completes, including any interrupt processing and
    206 *   related workers. Auditing all that code, across all drivers, is not
    207 *   feasible.
    208 *
    209 * * Due to how many other subsystems are involved and the locking hierarchies
    210 *   this pulls in there is extremely thin wiggle-room for driver-specific
    211 *   differences. &dma_fence interacts with almost all of the core memory
    212 *   handling through page fault handlers via &dma_resv, dma_resv_lock() and
    213 *   dma_resv_unlock(). On the other side it also interacts through all
    214 *   allocation sites through &mmu_notifier and &shrinker.
    215 *
    216 * Furthermore lockdep does not handle cross-release dependencies, which means
    217 * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught
    218 * at runtime with some quick testing. The simplest example is one thread
    219 * waiting on a &dma_fence while holding a lock::
    220 *
    221 *     lock(A);
    222 *     dma_fence_wait(B);
    223 *     unlock(A);
    224 *
    225 * while the other thread is stuck trying to acquire the same lock, which
    226 * prevents it from signalling the fence the previous thread is stuck waiting
    227 * on::
    228 *
    229 *     lock(A);
    230 *     unlock(A);
    231 *     dma_fence_signal(B);
    232 *
    233 * By manually annotating all code relevant to signalling a &dma_fence we can
    234 * teach lockdep about these dependencies, which also helps with the validation
    235 * headache since now lockdep can check all the rules for us::
    236 *
    237 *    cookie = dma_fence_begin_signalling();
    238 *    lock(A);
    239 *    unlock(A);
    240 *    dma_fence_signal(B);
    241 *    dma_fence_end_signalling(cookie);
    242 *
    243 * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to
    244 * annotate critical sections the following rules need to be observed:
    245 *
    246 * * All code necessary to complete a &dma_fence must be annotated, from the
    247 *   point where a fence is accessible to other threads, to the point where
    248 *   dma_fence_signal() is called. Un-annotated code can contain deadlock issues,
    249 *   and due to the very strict rules and many corner cases it is infeasible to
    250 *   catch these just with review or normal stress testing.
    251 *
    252 * * &struct dma_resv deserves a special note, since the readers are only
    253 *   protected by rcu. This means the signalling critical section starts as soon
    254 *   as the new fences are installed, even before dma_resv_unlock() is called.
    255 *
    256 * * The only exception are fast paths and opportunistic signalling code, which
    257 *   calls dma_fence_signal() purely as an optimization, but is not required to
    258 *   guarantee completion of a &dma_fence. The usual example is a wait IOCTL
    259 *   which calls dma_fence_signal(), while the mandatory completion path goes
    260 *   through a hardware interrupt and possible job completion worker.
    261 *
    262 * * To aid composability of code, the annotations can be freely nested, as long
    263 *   as the overall locking hierarchy is consistent. The annotations also work
    264 *   both in interrupt and process context. Due to implementation details this
    265 *   requires that callers pass an opaque cookie from
    266 *   dma_fence_begin_signalling() to dma_fence_end_signalling().
    267 *
    268 * * Validation against the cross driver contract is implemented by priming
    269 *   lockdep with the relevant hierarchy at boot-up. This means even just
    270 *   testing with a single device is enough to validate a driver, at least as
    271 *   far as deadlocks with dma_fence_wait() against dma_fence_signal() are
    272 *   concerned.
    273 */
    274#ifdef CONFIG_LOCKDEP
    275static struct lockdep_map dma_fence_lockdep_map = {
    276	.name = "dma_fence_map"
    277};
    278
    279/**
    280 * dma_fence_begin_signalling - begin a critical DMA fence signalling section
    281 *
    282 * Drivers should use this to annotate the beginning of any code section
    283 * required to eventually complete &dma_fence by calling dma_fence_signal().
    284 *
    285 * The end of these critical sections are annotated with
    286 * dma_fence_end_signalling().
    287 *
    288 * Returns:
    289 *
    290 * Opaque cookie needed by the implementation, which needs to be passed to
    291 * dma_fence_end_signalling().
    292 */
    293bool dma_fence_begin_signalling(void)
    294{
    295	/* explicitly nesting ... */
    296	if (lock_is_held_type(&dma_fence_lockdep_map, 1))
    297		return true;
    298
    299	/* rely on might_sleep check for soft/hardirq locks */
    300	if (in_atomic())
    301		return true;
    302
    303	/* ... and non-recursive readlock */
    304	lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
    305
    306	return false;
    307}
    308EXPORT_SYMBOL(dma_fence_begin_signalling);
    309
    310/**
    311 * dma_fence_end_signalling - end a critical DMA fence signalling section
    312 * @cookie: opaque cookie from dma_fence_begin_signalling()
    313 *
    314 * Closes a critical section annotation opened by dma_fence_begin_signalling().
    315 */
    316void dma_fence_end_signalling(bool cookie)
    317{
    318	if (cookie)
    319		return;
    320
    321	lock_release(&dma_fence_lockdep_map, _RET_IP_);
    322}
    323EXPORT_SYMBOL(dma_fence_end_signalling);
    324
    325void __dma_fence_might_wait(void)
    326{
    327	bool tmp;
    328
    329	tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
    330	if (tmp)
    331		lock_release(&dma_fence_lockdep_map, _THIS_IP_);
    332	lock_map_acquire(&dma_fence_lockdep_map);
    333	lock_map_release(&dma_fence_lockdep_map);
    334	if (tmp)
    335		lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
    336}
    337#endif
    338
    339
    340/**
    341 * dma_fence_signal_timestamp_locked - signal completion of a fence
    342 * @fence: the fence to signal
    343 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
    344 *
    345 * Signal completion for software callbacks on a fence, this will unblock
    346 * dma_fence_wait() calls and run all the callbacks added with
    347 * dma_fence_add_callback(). Can be called multiple times, but since a fence
    348 * can only go from the unsignaled to the signaled state and not back, it will
    349 * only be effective the first time. Set the timestamp provided as the fence
    350 * signal timestamp.
    351 *
    352 * Unlike dma_fence_signal_timestamp(), this function must be called with
    353 * &dma_fence.lock held.
    354 *
    355 * Returns 0 on success and a negative error value when @fence has been
    356 * signalled already.
    357 */
    358int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
    359				      ktime_t timestamp)
    360{
    361	struct dma_fence_cb *cur, *tmp;
    362	struct list_head cb_list;
    363
    364	lockdep_assert_held(fence->lock);
    365
    366	if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
    367				      &fence->flags)))
    368		return -EINVAL;
    369
    370	/* Stash the cb_list before replacing it with the timestamp */
    371	list_replace(&fence->cb_list, &cb_list);
    372
    373	fence->timestamp = timestamp;
    374	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
    375	trace_dma_fence_signaled(fence);
    376
    377	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
    378		INIT_LIST_HEAD(&cur->node);
    379		cur->func(fence, cur);
    380	}
    381
    382	return 0;
    383}
    384EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
    385
    386/**
    387 * dma_fence_signal_timestamp - signal completion of a fence
    388 * @fence: the fence to signal
    389 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
    390 *
    391 * Signal completion for software callbacks on a fence, this will unblock
    392 * dma_fence_wait() calls and run all the callbacks added with
    393 * dma_fence_add_callback(). Can be called multiple times, but since a fence
    394 * can only go from the unsignaled to the signaled state and not back, it will
    395 * only be effective the first time. Set the timestamp provided as the fence
    396 * signal timestamp.
    397 *
    398 * Returns 0 on success and a negative error value when @fence has been
    399 * signalled already.
    400 */
    401int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
    402{
    403	unsigned long flags;
    404	int ret;
    405
    406	if (!fence)
    407		return -EINVAL;
    408
    409	spin_lock_irqsave(fence->lock, flags);
    410	ret = dma_fence_signal_timestamp_locked(fence, timestamp);
    411	spin_unlock_irqrestore(fence->lock, flags);
    412
    413	return ret;
    414}
    415EXPORT_SYMBOL(dma_fence_signal_timestamp);
    416
    417/**
    418 * dma_fence_signal_locked - signal completion of a fence
    419 * @fence: the fence to signal
    420 *
    421 * Signal completion for software callbacks on a fence, this will unblock
    422 * dma_fence_wait() calls and run all the callbacks added with
    423 * dma_fence_add_callback(). Can be called multiple times, but since a fence
    424 * can only go from the unsignaled to the signaled state and not back, it will
    425 * only be effective the first time.
    426 *
    427 * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
    428 * held.
    429 *
    430 * Returns 0 on success and a negative error value when @fence has been
    431 * signalled already.
    432 */
    433int dma_fence_signal_locked(struct dma_fence *fence)
    434{
    435	return dma_fence_signal_timestamp_locked(fence, ktime_get());
    436}
    437EXPORT_SYMBOL(dma_fence_signal_locked);
    438
    439/**
    440 * dma_fence_signal - signal completion of a fence
    441 * @fence: the fence to signal
    442 *
    443 * Signal completion for software callbacks on a fence, this will unblock
    444 * dma_fence_wait() calls and run all the callbacks added with
    445 * dma_fence_add_callback(). Can be called multiple times, but since a fence
    446 * can only go from the unsignaled to the signaled state and not back, it will
    447 * only be effective the first time.
    448 *
    449 * Returns 0 on success and a negative error value when @fence has been
    450 * signalled already.
    451 */
    452int dma_fence_signal(struct dma_fence *fence)
    453{
    454	unsigned long flags;
    455	int ret;
    456	bool tmp;
    457
    458	if (!fence)
    459		return -EINVAL;
    460
    461	tmp = dma_fence_begin_signalling();
    462
    463	spin_lock_irqsave(fence->lock, flags);
    464	ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
    465	spin_unlock_irqrestore(fence->lock, flags);
    466
    467	dma_fence_end_signalling(tmp);
    468
    469	return ret;
    470}
    471EXPORT_SYMBOL(dma_fence_signal);
    472
    473/**
    474 * dma_fence_wait_timeout - sleep until the fence gets signaled
    475 * or until timeout elapses
    476 * @fence: the fence to wait on
    477 * @intr: if true, do an interruptible wait
    478 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
    479 *
    480 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
    481 * remaining timeout in jiffies on success. Other error values may be
    482 * returned on custom implementations.
    483 *
    484 * Performs a synchronous wait on this fence. It is assumed the caller
    485 * directly or indirectly (buf-mgr between reservation and committing)
    486 * holds a reference to the fence, otherwise the fence might be
    487 * freed before return, resulting in undefined behavior.
    488 *
    489 * See also dma_fence_wait() and dma_fence_wait_any_timeout().
    490 */
    491signed long
    492dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
    493{
    494	signed long ret;
    495
    496	if (WARN_ON(timeout < 0))
    497		return -EINVAL;
    498
    499	might_sleep();
    500
    501	__dma_fence_might_wait();
    502
    503	trace_dma_fence_wait_start(fence);
    504	if (fence->ops->wait)
    505		ret = fence->ops->wait(fence, intr, timeout);
    506	else
    507		ret = dma_fence_default_wait(fence, intr, timeout);
    508	trace_dma_fence_wait_end(fence);
    509	return ret;
    510}
    511EXPORT_SYMBOL(dma_fence_wait_timeout);
    512
    513/**
    514 * dma_fence_release - default relese function for fences
    515 * @kref: &dma_fence.recfount
    516 *
    517 * This is the default release functions for &dma_fence. Drivers shouldn't call
    518 * this directly, but instead call dma_fence_put().
    519 */
    520void dma_fence_release(struct kref *kref)
    521{
    522	struct dma_fence *fence =
    523		container_of(kref, struct dma_fence, refcount);
    524
    525	trace_dma_fence_destroy(fence);
    526
    527	if (WARN(!list_empty(&fence->cb_list) &&
    528		 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
    529		 "Fence %s:%s:%llx:%llx released with pending signals!\n",
    530		 fence->ops->get_driver_name(fence),
    531		 fence->ops->get_timeline_name(fence),
    532		 fence->context, fence->seqno)) {
    533		unsigned long flags;
    534
    535		/*
    536		 * Failed to signal before release, likely a refcounting issue.
    537		 *
    538		 * This should never happen, but if it does make sure that we
    539		 * don't leave chains dangling. We set the error flag first
    540		 * so that the callbacks know this signal is due to an error.
    541		 */
    542		spin_lock_irqsave(fence->lock, flags);
    543		fence->error = -EDEADLK;
    544		dma_fence_signal_locked(fence);
    545		spin_unlock_irqrestore(fence->lock, flags);
    546	}
    547
    548	if (fence->ops->release)
    549		fence->ops->release(fence);
    550	else
    551		dma_fence_free(fence);
    552}
    553EXPORT_SYMBOL(dma_fence_release);
    554
    555/**
    556 * dma_fence_free - default release function for &dma_fence.
    557 * @fence: fence to release
    558 *
    559 * This is the default implementation for &dma_fence_ops.release. It calls
    560 * kfree_rcu() on @fence.
    561 */
    562void dma_fence_free(struct dma_fence *fence)
    563{
    564	kfree_rcu(fence, rcu);
    565}
    566EXPORT_SYMBOL(dma_fence_free);
    567
    568static bool __dma_fence_enable_signaling(struct dma_fence *fence)
    569{
    570	bool was_set;
    571
    572	lockdep_assert_held(fence->lock);
    573
    574	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
    575				   &fence->flags);
    576
    577	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
    578		return false;
    579
    580	if (!was_set && fence->ops->enable_signaling) {
    581		trace_dma_fence_enable_signal(fence);
    582
    583		if (!fence->ops->enable_signaling(fence)) {
    584			dma_fence_signal_locked(fence);
    585			return false;
    586		}
    587	}
    588
    589	return true;
    590}
    591
    592/**
    593 * dma_fence_enable_sw_signaling - enable signaling on fence
    594 * @fence: the fence to enable
    595 *
    596 * This will request for sw signaling to be enabled, to make the fence
    597 * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
    598 * internally.
    599 */
    600void dma_fence_enable_sw_signaling(struct dma_fence *fence)
    601{
    602	unsigned long flags;
    603
    604	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
    605		return;
    606
    607	spin_lock_irqsave(fence->lock, flags);
    608	__dma_fence_enable_signaling(fence);
    609	spin_unlock_irqrestore(fence->lock, flags);
    610}
    611EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
    612
    613/**
    614 * dma_fence_add_callback - add a callback to be called when the fence
    615 * is signaled
    616 * @fence: the fence to wait on
    617 * @cb: the callback to register
    618 * @func: the function to call
    619 *
    620 * Add a software callback to the fence. The caller should keep a reference to
    621 * the fence.
    622 *
    623 * @cb will be initialized by dma_fence_add_callback(), no initialization
    624 * by the caller is required. Any number of callbacks can be registered
    625 * to a fence, but a callback can only be registered to one fence at a time.
    626 *
    627 * If fence is already signaled, this function will return -ENOENT (and
    628 * *not* call the callback).
    629 *
    630 * Note that the callback can be called from an atomic context or irq context.
    631 *
    632 * Returns 0 in case of success, -ENOENT if the fence is already signaled
    633 * and -EINVAL in case of error.
    634 */
    635int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
    636			   dma_fence_func_t func)
    637{
    638	unsigned long flags;
    639	int ret = 0;
    640
    641	if (WARN_ON(!fence || !func))
    642		return -EINVAL;
    643
    644	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
    645		INIT_LIST_HEAD(&cb->node);
    646		return -ENOENT;
    647	}
    648
    649	spin_lock_irqsave(fence->lock, flags);
    650
    651	if (__dma_fence_enable_signaling(fence)) {
    652		cb->func = func;
    653		list_add_tail(&cb->node, &fence->cb_list);
    654	} else {
    655		INIT_LIST_HEAD(&cb->node);
    656		ret = -ENOENT;
    657	}
    658
    659	spin_unlock_irqrestore(fence->lock, flags);
    660
    661	return ret;
    662}
    663EXPORT_SYMBOL(dma_fence_add_callback);
    664
    665/**
    666 * dma_fence_get_status - returns the status upon completion
    667 * @fence: the dma_fence to query
    668 *
    669 * This wraps dma_fence_get_status_locked() to return the error status
    670 * condition on a signaled fence. See dma_fence_get_status_locked() for more
    671 * details.
    672 *
    673 * Returns 0 if the fence has not yet been signaled, 1 if the fence has
    674 * been signaled without an error condition, or a negative error code
    675 * if the fence has been completed in err.
    676 */
    677int dma_fence_get_status(struct dma_fence *fence)
    678{
    679	unsigned long flags;
    680	int status;
    681
    682	spin_lock_irqsave(fence->lock, flags);
    683	status = dma_fence_get_status_locked(fence);
    684	spin_unlock_irqrestore(fence->lock, flags);
    685
    686	return status;
    687}
    688EXPORT_SYMBOL(dma_fence_get_status);
    689
    690/**
    691 * dma_fence_remove_callback - remove a callback from the signaling list
    692 * @fence: the fence to wait on
    693 * @cb: the callback to remove
    694 *
    695 * Remove a previously queued callback from the fence. This function returns
    696 * true if the callback is successfully removed, or false if the fence has
    697 * already been signaled.
    698 *
    699 * *WARNING*:
    700 * Cancelling a callback should only be done if you really know what you're
    701 * doing, since deadlocks and race conditions could occur all too easily. For
    702 * this reason, it should only ever be done on hardware lockup recovery,
    703 * with a reference held to the fence.
    704 *
    705 * Behaviour is undefined if @cb has not been added to @fence using
    706 * dma_fence_add_callback() beforehand.
    707 */
    708bool
    709dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
    710{
    711	unsigned long flags;
    712	bool ret;
    713
    714	spin_lock_irqsave(fence->lock, flags);
    715
    716	ret = !list_empty(&cb->node);
    717	if (ret)
    718		list_del_init(&cb->node);
    719
    720	spin_unlock_irqrestore(fence->lock, flags);
    721
    722	return ret;
    723}
    724EXPORT_SYMBOL(dma_fence_remove_callback);
    725
    726struct default_wait_cb {
    727	struct dma_fence_cb base;
    728	struct task_struct *task;
    729};
    730
    731static void
    732dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
    733{
    734	struct default_wait_cb *wait =
    735		container_of(cb, struct default_wait_cb, base);
    736
    737	wake_up_state(wait->task, TASK_NORMAL);
    738}
    739
    740/**
    741 * dma_fence_default_wait - default sleep until the fence gets signaled
    742 * or until timeout elapses
    743 * @fence: the fence to wait on
    744 * @intr: if true, do an interruptible wait
    745 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
    746 *
    747 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
    748 * remaining timeout in jiffies on success. If timeout is zero the value one is
    749 * returned if the fence is already signaled for consistency with other
    750 * functions taking a jiffies timeout.
    751 */
    752signed long
    753dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
    754{
    755	struct default_wait_cb cb;
    756	unsigned long flags;
    757	signed long ret = timeout ? timeout : 1;
    758
    759	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
    760		return ret;
    761
    762	spin_lock_irqsave(fence->lock, flags);
    763
    764	if (intr && signal_pending(current)) {
    765		ret = -ERESTARTSYS;
    766		goto out;
    767	}
    768
    769	if (!__dma_fence_enable_signaling(fence))
    770		goto out;
    771
    772	if (!timeout) {
    773		ret = 0;
    774		goto out;
    775	}
    776
    777	cb.base.func = dma_fence_default_wait_cb;
    778	cb.task = current;
    779	list_add(&cb.base.node, &fence->cb_list);
    780
    781	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
    782		if (intr)
    783			__set_current_state(TASK_INTERRUPTIBLE);
    784		else
    785			__set_current_state(TASK_UNINTERRUPTIBLE);
    786		spin_unlock_irqrestore(fence->lock, flags);
    787
    788		ret = schedule_timeout(ret);
    789
    790		spin_lock_irqsave(fence->lock, flags);
    791		if (ret > 0 && intr && signal_pending(current))
    792			ret = -ERESTARTSYS;
    793	}
    794
    795	if (!list_empty(&cb.base.node))
    796		list_del(&cb.base.node);
    797	__set_current_state(TASK_RUNNING);
    798
    799out:
    800	spin_unlock_irqrestore(fence->lock, flags);
    801	return ret;
    802}
    803EXPORT_SYMBOL(dma_fence_default_wait);
    804
    805static bool
    806dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
    807			    uint32_t *idx)
    808{
    809	int i;
    810
    811	for (i = 0; i < count; ++i) {
    812		struct dma_fence *fence = fences[i];
    813		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
    814			if (idx)
    815				*idx = i;
    816			return true;
    817		}
    818	}
    819	return false;
    820}
    821
    822/**
    823 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
    824 * or until timeout elapses
    825 * @fences: array of fences to wait on
    826 * @count: number of fences to wait on
    827 * @intr: if true, do an interruptible wait
    828 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
    829 * @idx: used to store the first signaled fence index, meaningful only on
    830 *	positive return
    831 *
    832 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
    833 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
    834 * on success.
    835 *
    836 * Synchronous waits for the first fence in the array to be signaled. The
    837 * caller needs to hold a reference to all fences in the array, otherwise a
    838 * fence might be freed before return, resulting in undefined behavior.
    839 *
    840 * See also dma_fence_wait() and dma_fence_wait_timeout().
    841 */
    842signed long
    843dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
    844			   bool intr, signed long timeout, uint32_t *idx)
    845{
    846	struct default_wait_cb *cb;
    847	signed long ret = timeout;
    848	unsigned i;
    849
    850	if (WARN_ON(!fences || !count || timeout < 0))
    851		return -EINVAL;
    852
    853	if (timeout == 0) {
    854		for (i = 0; i < count; ++i)
    855			if (dma_fence_is_signaled(fences[i])) {
    856				if (idx)
    857					*idx = i;
    858				return 1;
    859			}
    860
    861		return 0;
    862	}
    863
    864	cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
    865	if (cb == NULL) {
    866		ret = -ENOMEM;
    867		goto err_free_cb;
    868	}
    869
    870	for (i = 0; i < count; ++i) {
    871		struct dma_fence *fence = fences[i];
    872
    873		cb[i].task = current;
    874		if (dma_fence_add_callback(fence, &cb[i].base,
    875					   dma_fence_default_wait_cb)) {
    876			/* This fence is already signaled */
    877			if (idx)
    878				*idx = i;
    879			goto fence_rm_cb;
    880		}
    881	}
    882
    883	while (ret > 0) {
    884		if (intr)
    885			set_current_state(TASK_INTERRUPTIBLE);
    886		else
    887			set_current_state(TASK_UNINTERRUPTIBLE);
    888
    889		if (dma_fence_test_signaled_any(fences, count, idx))
    890			break;
    891
    892		ret = schedule_timeout(ret);
    893
    894		if (ret > 0 && intr && signal_pending(current))
    895			ret = -ERESTARTSYS;
    896	}
    897
    898	__set_current_state(TASK_RUNNING);
    899
    900fence_rm_cb:
    901	while (i-- > 0)
    902		dma_fence_remove_callback(fences[i], &cb[i].base);
    903
    904err_free_cb:
    905	kfree(cb);
    906
    907	return ret;
    908}
    909EXPORT_SYMBOL(dma_fence_wait_any_timeout);
    910
    911/**
    912 * dma_fence_describe - Dump fence describtion into seq_file
    913 * @fence: the 6fence to describe
    914 * @seq: the seq_file to put the textual description into
    915 *
    916 * Dump a textual description of the fence and it's state into the seq_file.
    917 */
    918void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
    919{
    920	seq_printf(seq, "%s %s seq %llu %ssignalled\n",
    921		   fence->ops->get_driver_name(fence),
    922		   fence->ops->get_timeline_name(fence), fence->seqno,
    923		   dma_fence_is_signaled(fence) ? "" : "un");
    924}
    925EXPORT_SYMBOL(dma_fence_describe);
    926
    927/**
    928 * dma_fence_init - Initialize a custom fence.
    929 * @fence: the fence to initialize
    930 * @ops: the dma_fence_ops for operations on this fence
    931 * @lock: the irqsafe spinlock to use for locking this fence
    932 * @context: the execution context this fence is run on
    933 * @seqno: a linear increasing sequence number for this context
    934 *
    935 * Initializes an allocated fence, the caller doesn't have to keep its
    936 * refcount after committing with this fence, but it will need to hold a
    937 * refcount again if &dma_fence_ops.enable_signaling gets called.
    938 *
    939 * context and seqno are used for easy comparison between fences, allowing
    940 * to check which fence is later by simply using dma_fence_later().
    941 */
    942void
    943dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
    944	       spinlock_t *lock, u64 context, u64 seqno)
    945{
    946	BUG_ON(!lock);
    947	BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
    948
    949	kref_init(&fence->refcount);
    950	fence->ops = ops;
    951	INIT_LIST_HEAD(&fence->cb_list);
    952	fence->lock = lock;
    953	fence->context = context;
    954	fence->seqno = seqno;
    955	fence->flags = 0UL;
    956	fence->error = 0;
    957
    958	trace_dma_fence_init(fence);
    959}
    960EXPORT_SYMBOL(dma_fence_init);