cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dma-fence-chain.c (6982B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * fence-chain: chain fences together in a timeline
      4 *
      5 * Copyright (C) 2018 Advanced Micro Devices, Inc.
      6 * Authors:
      7 *	Christian König <christian.koenig@amd.com>
      8 */
      9
     10#include <linux/dma-fence-chain.h>
     11
     12static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
     13
     14/**
     15 * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
     16 * @chain: chain node to get the previous node from
     17 *
     18 * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
     19 * chain node.
     20 */
     21static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
     22{
     23	struct dma_fence *prev;
     24
     25	rcu_read_lock();
     26	prev = dma_fence_get_rcu_safe(&chain->prev);
     27	rcu_read_unlock();
     28	return prev;
     29}
     30
     31/**
     32 * dma_fence_chain_walk - chain walking function
     33 * @fence: current chain node
     34 *
     35 * Walk the chain to the next node. Returns the next fence or NULL if we are at
     36 * the end of the chain. Garbage collects chain nodes which are already
     37 * signaled.
     38 */
     39struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
     40{
     41	struct dma_fence_chain *chain, *prev_chain;
     42	struct dma_fence *prev, *replacement, *tmp;
     43
     44	chain = to_dma_fence_chain(fence);
     45	if (!chain) {
     46		dma_fence_put(fence);
     47		return NULL;
     48	}
     49
     50	while ((prev = dma_fence_chain_get_prev(chain))) {
     51
     52		prev_chain = to_dma_fence_chain(prev);
     53		if (prev_chain) {
     54			if (!dma_fence_is_signaled(prev_chain->fence))
     55				break;
     56
     57			replacement = dma_fence_chain_get_prev(prev_chain);
     58		} else {
     59			if (!dma_fence_is_signaled(prev))
     60				break;
     61
     62			replacement = NULL;
     63		}
     64
     65		tmp = cmpxchg((struct dma_fence __force **)&chain->prev,
     66			      prev, replacement);
     67		if (tmp == prev)
     68			dma_fence_put(tmp);
     69		else
     70			dma_fence_put(replacement);
     71		dma_fence_put(prev);
     72	}
     73
     74	dma_fence_put(fence);
     75	return prev;
     76}
     77EXPORT_SYMBOL(dma_fence_chain_walk);
     78
     79/**
     80 * dma_fence_chain_find_seqno - find fence chain node by seqno
     81 * @pfence: pointer to the chain node where to start
     82 * @seqno: the sequence number to search for
     83 *
     84 * Advance the fence pointer to the chain node which will signal this sequence
     85 * number. If no sequence number is provided then this is a no-op.
     86 *
     87 * Returns EINVAL if the fence is not a chain node or the sequence number has
     88 * not yet advanced far enough.
     89 */
     90int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
     91{
     92	struct dma_fence_chain *chain;
     93
     94	if (!seqno)
     95		return 0;
     96
     97	chain = to_dma_fence_chain(*pfence);
     98	if (!chain || chain->base.seqno < seqno)
     99		return -EINVAL;
    100
    101	dma_fence_chain_for_each(*pfence, &chain->base) {
    102		if ((*pfence)->context != chain->base.context ||
    103		    to_dma_fence_chain(*pfence)->prev_seqno < seqno)
    104			break;
    105	}
    106	dma_fence_put(&chain->base);
    107
    108	return 0;
    109}
    110EXPORT_SYMBOL(dma_fence_chain_find_seqno);
    111
    112static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
    113{
    114        return "dma_fence_chain";
    115}
    116
    117static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
    118{
    119        return "unbound";
    120}
    121
    122static void dma_fence_chain_irq_work(struct irq_work *work)
    123{
    124	struct dma_fence_chain *chain;
    125
    126	chain = container_of(work, typeof(*chain), work);
    127
    128	/* Try to rearm the callback */
    129	if (!dma_fence_chain_enable_signaling(&chain->base))
    130		/* Ok, we are done. No more unsignaled fences left */
    131		dma_fence_signal(&chain->base);
    132	dma_fence_put(&chain->base);
    133}
    134
    135static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
    136{
    137	struct dma_fence_chain *chain;
    138
    139	chain = container_of(cb, typeof(*chain), cb);
    140	init_irq_work(&chain->work, dma_fence_chain_irq_work);
    141	irq_work_queue(&chain->work);
    142	dma_fence_put(f);
    143}
    144
    145static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
    146{
    147	struct dma_fence_chain *head = to_dma_fence_chain(fence);
    148
    149	dma_fence_get(&head->base);
    150	dma_fence_chain_for_each(fence, &head->base) {
    151		struct dma_fence *f = dma_fence_chain_contained(fence);
    152
    153		dma_fence_get(f);
    154		if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
    155			dma_fence_put(fence);
    156			return true;
    157		}
    158		dma_fence_put(f);
    159	}
    160	dma_fence_put(&head->base);
    161	return false;
    162}
    163
    164static bool dma_fence_chain_signaled(struct dma_fence *fence)
    165{
    166	dma_fence_chain_for_each(fence, fence) {
    167		struct dma_fence *f = dma_fence_chain_contained(fence);
    168
    169		if (!dma_fence_is_signaled(f)) {
    170			dma_fence_put(fence);
    171			return false;
    172		}
    173	}
    174
    175	return true;
    176}
    177
    178static void dma_fence_chain_release(struct dma_fence *fence)
    179{
    180	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
    181	struct dma_fence *prev;
    182
    183	/* Manually unlink the chain as much as possible to avoid recursion
    184	 * and potential stack overflow.
    185	 */
    186	while ((prev = rcu_dereference_protected(chain->prev, true))) {
    187		struct dma_fence_chain *prev_chain;
    188
    189		if (kref_read(&prev->refcount) > 1)
    190		       break;
    191
    192		prev_chain = to_dma_fence_chain(prev);
    193		if (!prev_chain)
    194			break;
    195
    196		/* No need for atomic operations since we hold the last
    197		 * reference to prev_chain.
    198		 */
    199		chain->prev = prev_chain->prev;
    200		RCU_INIT_POINTER(prev_chain->prev, NULL);
    201		dma_fence_put(prev);
    202	}
    203	dma_fence_put(prev);
    204
    205	dma_fence_put(chain->fence);
    206	dma_fence_free(fence);
    207}
    208
    209const struct dma_fence_ops dma_fence_chain_ops = {
    210	.use_64bit_seqno = true,
    211	.get_driver_name = dma_fence_chain_get_driver_name,
    212	.get_timeline_name = dma_fence_chain_get_timeline_name,
    213	.enable_signaling = dma_fence_chain_enable_signaling,
    214	.signaled = dma_fence_chain_signaled,
    215	.release = dma_fence_chain_release,
    216};
    217EXPORT_SYMBOL(dma_fence_chain_ops);
    218
    219/**
    220 * dma_fence_chain_init - initialize a fence chain
    221 * @chain: the chain node to initialize
    222 * @prev: the previous fence
    223 * @fence: the current fence
    224 * @seqno: the sequence number to use for the fence chain
    225 *
    226 * Initialize a new chain node and either start a new chain or add the node to
    227 * the existing chain of the previous fence.
    228 */
    229void dma_fence_chain_init(struct dma_fence_chain *chain,
    230			  struct dma_fence *prev,
    231			  struct dma_fence *fence,
    232			  uint64_t seqno)
    233{
    234	struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
    235	uint64_t context;
    236
    237	spin_lock_init(&chain->lock);
    238	rcu_assign_pointer(chain->prev, prev);
    239	chain->fence = fence;
    240	chain->prev_seqno = 0;
    241
    242	/* Try to reuse the context of the previous chain node. */
    243	if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
    244		context = prev->context;
    245		chain->prev_seqno = prev->seqno;
    246	} else {
    247		context = dma_fence_context_alloc(1);
    248		/* Make sure that we always have a valid sequence number. */
    249		if (prev_chain)
    250			seqno = max(prev->seqno, seqno);
    251	}
    252
    253	dma_fence_init(&chain->base, &dma_fence_chain_ops,
    254		       &chain->lock, context, seqno);
    255
    256	/*
    257	 * Chaining dma_fence_chain container together is only allowed through
    258	 * the prev fence and not through the contained fence.
    259	 *
    260	 * The correct way of handling this is to flatten out the fence
    261	 * structure into a dma_fence_array by the caller instead.
    262	 */
    263	WARN_ON(dma_fence_is_chain(fence));
    264}
    265EXPORT_SYMBOL(dma_fence_chain_init);