cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dmaengine.h (5350B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * The contents of this file are private to DMA engine drivers, and is not
      4 * part of the API to be used by DMA engine users.
      5 */
      6#ifndef DMAENGINE_H
      7#define DMAENGINE_H
      8
      9#include <linux/bug.h>
     10#include <linux/dmaengine.h>
     11
     12/**
     13 * dma_cookie_init - initialize the cookies for a DMA channel
     14 * @chan: dma channel to initialize
     15 */
     16static inline void dma_cookie_init(struct dma_chan *chan)
     17{
     18	chan->cookie = DMA_MIN_COOKIE;
     19	chan->completed_cookie = DMA_MIN_COOKIE;
     20}
     21
     22/**
     23 * dma_cookie_assign - assign a DMA engine cookie to the descriptor
     24 * @tx: descriptor needing cookie
     25 *
     26 * Assign a unique non-zero per-channel cookie to the descriptor.
     27 * Note: caller is expected to hold a lock to prevent concurrency.
     28 */
     29static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
     30{
     31	struct dma_chan *chan = tx->chan;
     32	dma_cookie_t cookie;
     33
     34	cookie = chan->cookie + 1;
     35	if (cookie < DMA_MIN_COOKIE)
     36		cookie = DMA_MIN_COOKIE;
     37	tx->cookie = chan->cookie = cookie;
     38
     39	return cookie;
     40}
     41
     42/**
     43 * dma_cookie_complete - complete a descriptor
     44 * @tx: descriptor to complete
     45 *
     46 * Mark this descriptor complete by updating the channels completed
     47 * cookie marker.  Zero the descriptors cookie to prevent accidental
     48 * repeated completions.
     49 *
     50 * Note: caller is expected to hold a lock to prevent concurrency.
     51 */
     52static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
     53{
     54	BUG_ON(tx->cookie < DMA_MIN_COOKIE);
     55	tx->chan->completed_cookie = tx->cookie;
     56	tx->cookie = 0;
     57}
     58
     59/**
     60 * dma_cookie_status - report cookie status
     61 * @chan: dma channel
     62 * @cookie: cookie we are interested in
     63 * @state: dma_tx_state structure to return last/used cookies
     64 *
     65 * Report the status of the cookie, filling in the state structure if
     66 * non-NULL.  No locking is required.
     67 */
     68static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
     69	dma_cookie_t cookie, struct dma_tx_state *state)
     70{
     71	dma_cookie_t used, complete;
     72
     73	used = chan->cookie;
     74	complete = chan->completed_cookie;
     75	barrier();
     76	if (state) {
     77		state->last = complete;
     78		state->used = used;
     79		state->residue = 0;
     80		state->in_flight_bytes = 0;
     81	}
     82	return dma_async_is_complete(cookie, complete, used);
     83}
     84
     85static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
     86{
     87	if (state)
     88		state->residue = residue;
     89}
     90
     91static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
     92					   u32 in_flight_bytes)
     93{
     94	if (state)
     95		state->in_flight_bytes = in_flight_bytes;
     96}
     97
     98struct dmaengine_desc_callback {
     99	dma_async_tx_callback callback;
    100	dma_async_tx_callback_result callback_result;
    101	void *callback_param;
    102};
    103
    104/**
    105 * dmaengine_desc_get_callback - get the passed in callback function
    106 * @tx: tx descriptor
    107 * @cb: temp struct to hold the callback info
    108 *
    109 * Fill the passed in cb struct with what's available in the passed in
    110 * tx descriptor struct
    111 * No locking is required.
    112 */
    113static inline void
    114dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
    115			    struct dmaengine_desc_callback *cb)
    116{
    117	cb->callback = tx->callback;
    118	cb->callback_result = tx->callback_result;
    119	cb->callback_param = tx->callback_param;
    120}
    121
    122/**
    123 * dmaengine_desc_callback_invoke - call the callback function in cb struct
    124 * @cb: temp struct that is holding the callback info
    125 * @result: transaction result
    126 *
    127 * Call the callback function provided in the cb struct with the parameter
    128 * in the cb struct.
    129 * Locking is dependent on the driver.
    130 */
    131static inline void
    132dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
    133			       const struct dmaengine_result *result)
    134{
    135	struct dmaengine_result dummy_result = {
    136		.result = DMA_TRANS_NOERROR,
    137		.residue = 0
    138	};
    139
    140	if (cb->callback_result) {
    141		if (!result)
    142			result = &dummy_result;
    143		cb->callback_result(cb->callback_param, result);
    144	} else if (cb->callback) {
    145		cb->callback(cb->callback_param);
    146	}
    147}
    148
    149/**
    150 * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
    151 * 					then immediately call the callback.
    152 * @tx: dma async tx descriptor
    153 * @result: transaction result
    154 *
    155 * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
    156 * in a single function since no work is necessary in between for the driver.
    157 * Locking is dependent on the driver.
    158 */
    159static inline void
    160dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
    161				   const struct dmaengine_result *result)
    162{
    163	struct dmaengine_desc_callback cb;
    164
    165	dmaengine_desc_get_callback(tx, &cb);
    166	dmaengine_desc_callback_invoke(&cb, result);
    167}
    168
    169/**
    170 * dmaengine_desc_callback_valid - verify the callback is valid in cb
    171 * @cb: callback info struct
    172 *
    173 * Return a bool that verifies whether callback in cb is valid or not.
    174 * No locking is required.
    175 */
    176static inline bool
    177dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
    178{
    179	return cb->callback || cb->callback_result;
    180}
    181
    182struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
    183struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
    184
    185#ifdef CONFIG_DEBUG_FS
    186#include <linux/debugfs.h>
    187
    188static inline struct dentry *
    189dmaengine_get_debugfs_root(struct dma_device *dma_dev) {
    190	return dma_dev->dbg_dev_root;
    191}
    192#else
    193struct dentry;
    194static inline struct dentry *
    195dmaengine_get_debugfs_root(struct dma_device *dma_dev)
    196{
    197	return NULL;
    198}
    199#endif /* CONFIG_DEBUG_FS */
    200
    201#endif