cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

k3-udma-glue.h (5512B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 *  Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
      4 */
      5
      6#ifndef K3_UDMA_GLUE_H_
      7#define K3_UDMA_GLUE_H_
      8
      9#include <linux/types.h>
     10#include <linux/soc/ti/k3-ringacc.h>
     11#include <linux/dma/ti-cppi5.h>
     12
     13struct k3_udma_glue_tx_channel_cfg {
     14	struct k3_ring_cfg tx_cfg;
     15	struct k3_ring_cfg txcq_cfg;
     16
     17	bool tx_pause_on_err;
     18	bool tx_filt_einfo;
     19	bool tx_filt_pswords;
     20	bool tx_supr_tdpkt;
     21	u32  swdata_size;
     22};
     23
     24struct k3_udma_glue_tx_channel;
     25
     26struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
     27		const char *name, struct k3_udma_glue_tx_channel_cfg *cfg);
     28
     29void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
     30int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
     31			     struct cppi5_host_desc_t *desc_tx,
     32			     dma_addr_t desc_dma);
     33int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
     34			    dma_addr_t *desc_dma);
     35int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
     36void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
     37void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
     38			       bool sync);
     39void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
     40		void *data, void (*cleanup)(void *data, dma_addr_t desc_dma));
     41u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn);
     42u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
     43int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
     44struct device *
     45	k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn);
     46void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
     47				       dma_addr_t *addr);
     48void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
     49				       dma_addr_t *addr);
     50
     51enum {
     52	K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
     53	K3_UDMA_GLUE_SRC_TAG_LO_USE_FLOW_REG = 1,
     54	K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_FLOW_ID = 2,
     55	K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG = 4,
     56};
     57
     58/**
     59 * k3_udma_glue_rx_flow_cfg - UDMA RX flow cfg
     60 *
     61 * @rx_cfg:		RX ring configuration
     62 * @rxfdq_cfg:		RX free Host PD ring configuration
     63 * @ring_rxq_id:	RX ring id (or -1 for any)
     64 * @ring_rxfdq0_id:	RX free Host PD ring (FDQ) if (or -1 for any)
     65 * @rx_error_handling:	Rx Error Handling Mode (0 - drop, 1 - re-try)
     66 * @src_tag_lo_sel:	Rx Source Tag Low Byte Selector in Host PD
     67 */
     68struct k3_udma_glue_rx_flow_cfg {
     69	struct k3_ring_cfg rx_cfg;
     70	struct k3_ring_cfg rxfdq_cfg;
     71	int ring_rxq_id;
     72	int ring_rxfdq0_id;
     73	bool rx_error_handling;
     74	int src_tag_lo_sel;
     75};
     76
     77/**
     78 * k3_udma_glue_rx_channel_cfg - UDMA RX channel cfg
     79 *
     80 * @psdata_size:	SW Data is present in Host PD of @swdata_size bytes
     81 * @flow_id_base:	first flow_id used by channel.
     82 *			if @flow_id_base = -1 - range of GP rflows will be
     83 *			allocated dynamically.
     84 * @flow_id_num:	number of RX flows used by channel
     85 * @flow_id_use_rxchan_id:	use RX channel id as flow id,
     86 *				used only if @flow_id_num = 1
     87 * @remote		indication that RX channel is remote - some remote CPU
     88 *			core owns and control the RX channel. Linux Host only
     89 *			allowed to attach and configure RX Flow within RX
     90 *			channel. if set - not RX channel operation will be
     91 *			performed by K3 NAVSS DMA glue interface.
     92 * @def_flow_cfg	default RX flow configuration,
     93 *			used only if @flow_id_num = 1
     94 */
     95struct k3_udma_glue_rx_channel_cfg {
     96	u32  swdata_size;
     97	int  flow_id_base;
     98	int  flow_id_num;
     99	bool flow_id_use_rxchan_id;
    100	bool remote;
    101
    102	struct k3_udma_glue_rx_flow_cfg *def_flow_cfg;
    103};
    104
    105struct k3_udma_glue_rx_channel;
    106
    107struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn(
    108		struct device *dev,
    109		const char *name,
    110		struct k3_udma_glue_rx_channel_cfg *cfg);
    111
    112void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
    113int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
    114void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
    115void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
    116			       bool sync);
    117int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
    118		u32 flow_num, struct cppi5_host_desc_t *desc_tx,
    119		dma_addr_t desc_dma);
    120int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
    121		u32 flow_num, dma_addr_t *desc_dma);
    122int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
    123		u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg);
    124u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
    125				    u32 flow_idx);
    126u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
    127int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
    128			    u32 flow_num);
    129void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn,
    130			     u32 flow_num);
    131void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
    132		u32 flow_num, void *data,
    133		void (*cleanup)(void *data, dma_addr_t desc_dma),
    134		bool skip_fdq);
    135int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
    136				u32 flow_idx);
    137int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
    138				 u32 flow_idx);
    139struct device *
    140	k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn);
    141void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
    142				       dma_addr_t *addr);
    143void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
    144				       dma_addr_t *addr);
    145
    146#endif /* K3_UDMA_GLUE_H_ */