cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

async_tx.h (6860B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright © 2006, Intel Corporation.
      4 */
      5#ifndef _ASYNC_TX_H_
      6#define _ASYNC_TX_H_
      7#include <linux/dmaengine.h>
      8#include <linux/spinlock.h>
      9#include <linux/interrupt.h>
     10
     11/* on architectures without dma-mapping capabilities we need to ensure
     12 * that the asynchronous path compiles away
     13 */
     14#ifdef CONFIG_HAS_DMA
     15#define __async_inline
     16#else
     17#define __async_inline __always_inline
     18#endif
     19
     20/**
     21 * dma_chan_ref - object used to manage dma channels received from the
     22 *   dmaengine core.
     23 * @chan - the channel being tracked
     24 * @node - node for the channel to be placed on async_tx_master_list
     25 * @rcu - for list_del_rcu
     26 * @count - number of times this channel is listed in the pool
     27 *	(for channels with multiple capabiities)
     28 */
     29struct dma_chan_ref {
     30	struct dma_chan *chan;
     31	struct list_head node;
     32	struct rcu_head rcu;
     33	atomic_t count;
     34};
     35
     36/**
     37 * async_tx_flags - modifiers for the async_* calls
     38 * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
     39 * destination address is not a source.  The asynchronous case handles this
     40 * implicitly, the synchronous case needs to zero the destination block.
     41 * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
     42 * also one of the source addresses.  In the synchronous case the destination
     43 * address is an implied source, whereas the asynchronous case it must be listed
     44 * as a source.  The destination address must be the first address in the source
     45 * array.
     46 * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
     47 * dependency chain
     48 * @ASYNC_TX_FENCE: specify that the next operation in the dependency
     49 * chain uses this operation's result as an input
     50 * @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the
     51 * input data. Required for rmw case.
     52 */
     53enum async_tx_flags {
     54	ASYNC_TX_XOR_ZERO_DST	 = (1 << 0),
     55	ASYNC_TX_XOR_DROP_DST	 = (1 << 1),
     56	ASYNC_TX_ACK		 = (1 << 2),
     57	ASYNC_TX_FENCE		 = (1 << 3),
     58	ASYNC_TX_PQ_XOR_DST	 = (1 << 4),
     59};
     60
     61/**
     62 * struct async_submit_ctl - async_tx submission/completion modifiers
     63 * @flags: submission modifiers
     64 * @depend_tx: parent dependency of the current operation being submitted
     65 * @cb_fn: callback routine to run at operation completion
     66 * @cb_param: parameter for the callback routine
     67 * @scribble: caller provided space for dma/page address conversions
     68 */
     69struct async_submit_ctl {
     70	enum async_tx_flags flags;
     71	struct dma_async_tx_descriptor *depend_tx;
     72	dma_async_tx_callback cb_fn;
     73	void *cb_param;
     74	void *scribble;
     75};
     76
     77#if defined(CONFIG_DMA_ENGINE) && !defined(CONFIG_ASYNC_TX_CHANNEL_SWITCH)
     78#define async_tx_issue_pending_all dma_issue_pending_all
     79
     80/**
     81 * async_tx_issue_pending - send pending descriptor to the hardware channel
     82 * @tx: descriptor handle to retrieve hardware context
     83 *
     84 * Note: any dependent operations will have already been issued by
     85 * async_tx_channel_switch, or (in the case of no channel switch) will
     86 * be already pending on this channel.
     87 */
     88static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
     89{
     90	if (likely(tx)) {
     91		struct dma_chan *chan = tx->chan;
     92		struct dma_device *dma = chan->device;
     93
     94		dma->device_issue_pending(chan);
     95	}
     96}
     97#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
     98#include <asm/async_tx.h>
     99#else
    100#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
    101	 __async_tx_find_channel(dep, type)
    102struct dma_chan *
    103__async_tx_find_channel(struct async_submit_ctl *submit,
    104			enum dma_transaction_type tx_type);
    105#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
    106#else
    107static inline void async_tx_issue_pending_all(void)
    108{
    109	do { } while (0);
    110}
    111
    112static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
    113{
    114	do { } while (0);
    115}
    116
    117static inline struct dma_chan *
    118async_tx_find_channel(struct async_submit_ctl *submit,
    119		      enum dma_transaction_type tx_type, struct page **dst,
    120		      int dst_count, struct page **src, int src_count,
    121		      size_t len)
    122{
    123	return NULL;
    124}
    125#endif
    126
    127/**
    128 * async_tx_sync_epilog - actions to take if an operation is run synchronously
    129 * @cb_fn: function to call when the transaction completes
    130 * @cb_fn_param: parameter to pass to the callback routine
    131 */
    132static inline void
    133async_tx_sync_epilog(struct async_submit_ctl *submit)
    134{
    135	if (submit->cb_fn)
    136		submit->cb_fn(submit->cb_param);
    137}
    138
    139typedef union {
    140	unsigned long addr;
    141	struct page *page;
    142	dma_addr_t dma;
    143} addr_conv_t;
    144
    145static inline void
    146init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags,
    147		  struct dma_async_tx_descriptor *tx,
    148		  dma_async_tx_callback cb_fn, void *cb_param,
    149		  addr_conv_t *scribble)
    150{
    151	args->flags = flags;
    152	args->depend_tx = tx;
    153	args->cb_fn = cb_fn;
    154	args->cb_param = cb_param;
    155	args->scribble = scribble;
    156}
    157
    158void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
    159		     struct async_submit_ctl *submit);
    160
    161struct dma_async_tx_descriptor *
    162async_xor(struct page *dest, struct page **src_list, unsigned int offset,
    163	  int src_cnt, size_t len, struct async_submit_ctl *submit);
    164
    165struct dma_async_tx_descriptor *
    166async_xor_offs(struct page *dest, unsigned int offset,
    167		struct page **src_list, unsigned int *src_offset,
    168		int src_cnt, size_t len, struct async_submit_ctl *submit);
    169
    170struct dma_async_tx_descriptor *
    171async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
    172	      int src_cnt, size_t len, enum sum_check_flags *result,
    173	      struct async_submit_ctl *submit);
    174
    175struct dma_async_tx_descriptor *
    176async_xor_val_offs(struct page *dest, unsigned int offset,
    177		struct page **src_list, unsigned int *src_offset,
    178		int src_cnt, size_t len, enum sum_check_flags *result,
    179		struct async_submit_ctl *submit);
    180
    181struct dma_async_tx_descriptor *
    182async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
    183	     unsigned int src_offset, size_t len,
    184	     struct async_submit_ctl *submit);
    185
    186struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
    187
    188struct dma_async_tx_descriptor *
    189async_gen_syndrome(struct page **blocks, unsigned int *offsets, int src_cnt,
    190		   size_t len, struct async_submit_ctl *submit);
    191
    192struct dma_async_tx_descriptor *
    193async_syndrome_val(struct page **blocks, unsigned int *offsets, int src_cnt,
    194		   size_t len, enum sum_check_flags *pqres, struct page *spare,
    195		   unsigned int s_off, struct async_submit_ctl *submit);
    196
    197struct dma_async_tx_descriptor *
    198async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
    199			struct page **ptrs, unsigned int *offs,
    200			struct async_submit_ctl *submit);
    201
    202struct dma_async_tx_descriptor *
    203async_raid6_datap_recov(int src_num, size_t bytes, int faila,
    204			struct page **ptrs, unsigned int *offs,
    205			struct async_submit_ctl *submit);
    206
    207void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
    208#endif /* _ASYNC_TX_H_ */