cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

user.h (11321B)


      1/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
      2/* Copyright (c) 2015 - 2020 Intel Corporation */
      3#ifndef IRDMA_USER_H
      4#define IRDMA_USER_H
      5
      6#define irdma_handle void *
      7#define irdma_adapter_handle irdma_handle
      8#define irdma_qp_handle irdma_handle
      9#define irdma_cq_handle irdma_handle
     10#define irdma_pd_id irdma_handle
     11#define irdma_stag_handle irdma_handle
     12#define irdma_stag_index u32
     13#define irdma_stag u32
     14#define irdma_stag_key u8
     15#define irdma_tagged_offset u64
     16#define irdma_access_privileges u32
     17#define irdma_physical_fragment u64
     18#define irdma_address_list u64 *
     19
     20#define	IRDMA_MAX_MR_SIZE       0x200000000000ULL
     21
     22#define IRDMA_ACCESS_FLAGS_LOCALREAD		0x01
     23#define IRDMA_ACCESS_FLAGS_LOCALWRITE		0x02
     24#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY	0x04
     25#define IRDMA_ACCESS_FLAGS_REMOTEREAD		0x05
     26#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY	0x08
     27#define IRDMA_ACCESS_FLAGS_REMOTEWRITE		0x0a
     28#define IRDMA_ACCESS_FLAGS_BIND_WINDOW		0x10
     29#define IRDMA_ACCESS_FLAGS_ZERO_BASED		0x20
     30#define IRDMA_ACCESS_FLAGS_ALL			0x3f
     31
     32#define IRDMA_OP_TYPE_RDMA_WRITE		0x00
     33#define IRDMA_OP_TYPE_RDMA_READ			0x01
     34#define IRDMA_OP_TYPE_SEND			0x03
     35#define IRDMA_OP_TYPE_SEND_INV			0x04
     36#define IRDMA_OP_TYPE_SEND_SOL			0x05
     37#define IRDMA_OP_TYPE_SEND_SOL_INV		0x06
     38#define IRDMA_OP_TYPE_RDMA_WRITE_SOL		0x0d
     39#define IRDMA_OP_TYPE_BIND_MW			0x08
     40#define IRDMA_OP_TYPE_FAST_REG_NSMR		0x09
     41#define IRDMA_OP_TYPE_INV_STAG			0x0a
     42#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG	0x0b
     43#define IRDMA_OP_TYPE_NOP			0x0c
     44#define IRDMA_OP_TYPE_REC	0x3e
     45#define IRDMA_OP_TYPE_REC_IMM	0x3f
     46
     47#define IRDMA_FLUSH_MAJOR_ERR	1
     48
     49enum irdma_device_caps_const {
     50	IRDMA_WQE_SIZE =			4,
     51	IRDMA_CQP_WQE_SIZE =			8,
     52	IRDMA_CQE_SIZE =			4,
     53	IRDMA_EXTENDED_CQE_SIZE =		8,
     54	IRDMA_AEQE_SIZE =			2,
     55	IRDMA_CEQE_SIZE =			1,
     56	IRDMA_CQP_CTX_SIZE =			8,
     57	IRDMA_SHADOW_AREA_SIZE =		8,
     58	IRDMA_QUERY_FPM_BUF_SIZE =		176,
     59	IRDMA_COMMIT_FPM_BUF_SIZE =		176,
     60	IRDMA_GATHER_STATS_BUF_SIZE =		1024,
     61	IRDMA_MIN_IW_QP_ID =			0,
     62	IRDMA_MAX_IW_QP_ID =			262143,
     63	IRDMA_MIN_CEQID =			0,
     64	IRDMA_MAX_CEQID =			1023,
     65	IRDMA_CEQ_MAX_COUNT =			IRDMA_MAX_CEQID + 1,
     66	IRDMA_MIN_CQID =			0,
     67	IRDMA_MAX_CQID =			524287,
     68	IRDMA_MIN_AEQ_ENTRIES =			1,
     69	IRDMA_MAX_AEQ_ENTRIES =			524287,
     70	IRDMA_MIN_CEQ_ENTRIES =			1,
     71	IRDMA_MAX_CEQ_ENTRIES =			262143,
     72	IRDMA_MIN_CQ_SIZE =			1,
     73	IRDMA_MAX_CQ_SIZE =			1048575,
     74	IRDMA_DB_ID_ZERO =			0,
     75	IRDMA_MAX_WQ_FRAGMENT_COUNT =		13,
     76	IRDMA_MAX_SGE_RD =			13,
     77	IRDMA_MAX_OUTBOUND_MSG_SIZE =		2147483647,
     78	IRDMA_MAX_INBOUND_MSG_SIZE =		2147483647,
     79	IRDMA_MAX_PUSH_PAGE_COUNT =		1024,
     80	IRDMA_MAX_PE_ENA_VF_COUNT =		32,
     81	IRDMA_MAX_VF_FPM_ID =			47,
     82	IRDMA_MAX_SQ_PAYLOAD_SIZE =		2145386496,
     83	IRDMA_MAX_INLINE_DATA_SIZE =		101,
     84	IRDMA_MAX_WQ_ENTRIES =			32768,
     85	IRDMA_Q2_BUF_SIZE =			256,
     86	IRDMA_QP_CTX_SIZE =			256,
     87	IRDMA_MAX_PDS =				262144,
     88};
     89
     90enum irdma_addressing_type {
     91	IRDMA_ADDR_TYPE_ZERO_BASED = 0,
     92	IRDMA_ADDR_TYPE_VA_BASED   = 1,
     93};
     94
     95enum irdma_flush_opcode {
     96	FLUSH_INVALID = 0,
     97	FLUSH_GENERAL_ERR,
     98	FLUSH_PROT_ERR,
     99	FLUSH_REM_ACCESS_ERR,
    100	FLUSH_LOC_QP_OP_ERR,
    101	FLUSH_REM_OP_ERR,
    102	FLUSH_LOC_LEN_ERR,
    103	FLUSH_FATAL_ERR,
    104	FLUSH_RETRY_EXC_ERR,
    105	FLUSH_MW_BIND_ERR,
    106};
    107
    108enum irdma_cmpl_status {
    109	IRDMA_COMPL_STATUS_SUCCESS = 0,
    110	IRDMA_COMPL_STATUS_FLUSHED,
    111	IRDMA_COMPL_STATUS_INVALID_WQE,
    112	IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
    113	IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
    114	IRDMA_COMPL_STATUS_INVALID_STAG,
    115	IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
    116	IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
    117	IRDMA_COMPL_STATUS_INVALID_PD_ID,
    118	IRDMA_COMPL_STATUS_WRAP_ERROR,
    119	IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
    120	IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
    121	IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
    122	IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
    123	IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
    124	IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
    125	IRDMA_COMPL_STATUS_INVALID_FBO,
    126	IRDMA_COMPL_STATUS_INVALID_LEN,
    127	IRDMA_COMPL_STATUS_INVALID_ACCESS,
    128	IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
    129	IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
    130	IRDMA_COMPL_STATUS_INVALID_REGION,
    131	IRDMA_COMPL_STATUS_INVALID_WINDOW,
    132	IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
    133	IRDMA_COMPL_STATUS_UNKNOWN,
    134};
    135
    136enum irdma_cmpl_notify {
    137	IRDMA_CQ_COMPL_EVENT     = 0,
    138	IRDMA_CQ_COMPL_SOLICITED = 1,
    139};
    140
    141enum irdma_qp_caps {
    142	IRDMA_WRITE_WITH_IMM = 1,
    143	IRDMA_SEND_WITH_IMM  = 2,
    144	IRDMA_ROCE	     = 4,
    145	IRDMA_PUSH_MODE      = 8,
    146};
    147
    148struct irdma_qp_uk;
    149struct irdma_cq_uk;
    150struct irdma_qp_uk_init_info;
    151struct irdma_cq_uk_init_info;
    152
    153struct irdma_ring {
    154	u32 head;
    155	u32 tail;
    156	u32 size;
    157};
    158
    159struct irdma_cqe {
    160	__le64 buf[IRDMA_CQE_SIZE];
    161};
    162
    163struct irdma_extended_cqe {
    164	__le64 buf[IRDMA_EXTENDED_CQE_SIZE];
    165};
    166
    167struct irdma_post_send {
    168	struct ib_sge *sg_list;
    169	u32 num_sges;
    170	u32 qkey;
    171	u32 dest_qp;
    172	u32 ah_id;
    173};
    174
    175struct irdma_post_inline_send {
    176	void *data;
    177	u32 len;
    178	u32 qkey;
    179	u32 dest_qp;
    180	u32 ah_id;
    181};
    182
    183struct irdma_post_rq_info {
    184	u64 wr_id;
    185	struct ib_sge *sg_list;
    186	u32 num_sges;
    187};
    188
    189struct irdma_rdma_write {
    190	struct ib_sge *lo_sg_list;
    191	u32 num_lo_sges;
    192	struct ib_sge rem_addr;
    193};
    194
    195struct irdma_inline_rdma_write {
    196	void *data;
    197	u32 len;
    198	struct ib_sge rem_addr;
    199};
    200
    201struct irdma_rdma_read {
    202	struct ib_sge *lo_sg_list;
    203	u32 num_lo_sges;
    204	struct ib_sge rem_addr;
    205};
    206
    207struct irdma_bind_window {
    208	irdma_stag mr_stag;
    209	u64 bind_len;
    210	void *va;
    211	enum irdma_addressing_type addressing_type;
    212	bool ena_reads:1;
    213	bool ena_writes:1;
    214	irdma_stag mw_stag;
    215	bool mem_window_type_1:1;
    216};
    217
    218struct irdma_inv_local_stag {
    219	irdma_stag target_stag;
    220};
    221
    222struct irdma_post_sq_info {
    223	u64 wr_id;
    224	u8 op_type;
    225	u8 l4len;
    226	bool signaled:1;
    227	bool read_fence:1;
    228	bool local_fence:1;
    229	bool inline_data:1;
    230	bool imm_data_valid:1;
    231	bool push_wqe:1;
    232	bool report_rtt:1;
    233	bool udp_hdr:1;
    234	bool defer_flag:1;
    235	u32 imm_data;
    236	u32 stag_to_inv;
    237	union {
    238		struct irdma_post_send send;
    239		struct irdma_rdma_write rdma_write;
    240		struct irdma_rdma_read rdma_read;
    241		struct irdma_bind_window bind_window;
    242		struct irdma_inv_local_stag inv_local_stag;
    243		struct irdma_inline_rdma_write inline_rdma_write;
    244		struct irdma_post_inline_send inline_send;
    245	} op;
    246};
    247
    248struct irdma_cq_poll_info {
    249	u64 wr_id;
    250	irdma_qp_handle qp_handle;
    251	u32 bytes_xfered;
    252	u32 tcp_seq_num_rtt;
    253	u32 qp_id;
    254	u32 ud_src_qpn;
    255	u32 imm_data;
    256	irdma_stag inv_stag; /* or L_R_Key */
    257	enum irdma_cmpl_status comp_status;
    258	u16 major_err;
    259	u16 minor_err;
    260	u16 ud_vlan;
    261	u8 ud_smac[6];
    262	u8 op_type;
    263	bool stag_invalid_set:1; /* or L_R_Key set */
    264	bool push_dropped:1;
    265	bool error:1;
    266	bool solicited_event:1;
    267	bool ipv4:1;
    268	bool ud_vlan_valid:1;
    269	bool ud_smac_valid:1;
    270	bool imm_valid:1;
    271};
    272
    273int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
    274			       struct irdma_post_sq_info *info, bool post_sq);
    275int irdma_uk_inline_send(struct irdma_qp_uk *qp,
    276			 struct irdma_post_sq_info *info, bool post_sq);
    277int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
    278		      bool post_sq);
    279int irdma_uk_post_receive(struct irdma_qp_uk *qp,
    280			  struct irdma_post_rq_info *info);
    281void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
    282int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
    283		       bool inv_stag, bool post_sq);
    284int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
    285			bool post_sq);
    286int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
    287		  bool post_sq);
    288int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
    289				   struct irdma_post_sq_info *info,
    290				   bool post_sq);
    291
    292struct irdma_wqe_uk_ops {
    293	void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
    294	u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
    295	void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
    296				u8 valid);
    297	void (*iw_set_mw_bind_wqe)(__le64 *wqe,
    298				   struct irdma_bind_window *op_info);
    299};
    300
    301int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
    302			  struct irdma_cq_poll_info *info);
    303void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
    304				      enum irdma_cmpl_notify cq_notify);
    305void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
    306void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
    307void irdma_uk_cq_init(struct irdma_cq_uk *cq,
    308		      struct irdma_cq_uk_init_info *info);
    309int irdma_uk_qp_init(struct irdma_qp_uk *qp,
    310		     struct irdma_qp_uk_init_info *info);
    311struct irdma_sq_uk_wr_trk_info {
    312	u64 wrid;
    313	u32 wr_len;
    314	u16 quanta;
    315	u8 reserved[2];
    316};
    317
    318struct irdma_qp_quanta {
    319	__le64 elem[IRDMA_WQE_SIZE];
    320};
    321
    322struct irdma_qp_uk {
    323	struct irdma_qp_quanta *sq_base;
    324	struct irdma_qp_quanta *rq_base;
    325	struct irdma_uk_attrs *uk_attrs;
    326	u32 __iomem *wqe_alloc_db;
    327	struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
    328	u64 *rq_wrid_array;
    329	__le64 *shadow_area;
    330	__le32 *push_db;
    331	__le64 *push_wqe;
    332	struct irdma_ring sq_ring;
    333	struct irdma_ring rq_ring;
    334	struct irdma_ring initial_ring;
    335	u32 qp_id;
    336	u32 qp_caps;
    337	u32 sq_size;
    338	u32 rq_size;
    339	u32 max_sq_frag_cnt;
    340	u32 max_rq_frag_cnt;
    341	u32 max_inline_data;
    342	struct irdma_wqe_uk_ops wqe_ops;
    343	u16 conn_wqes;
    344	u8 qp_type;
    345	u8 swqe_polarity;
    346	u8 swqe_polarity_deferred;
    347	u8 rwqe_polarity;
    348	u8 rq_wqe_size;
    349	u8 rq_wqe_size_multiplier;
    350	bool deferred_flag:1;
    351	bool push_mode:1; /* whether the last post wqe was pushed */
    352	bool push_dropped:1;
    353	bool first_sq_wq:1;
    354	bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
    355	bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
    356	bool destroy_pending:1; /* Indicates the QP is being destroyed */
    357	void *back_qp;
    358	u8 dbg_rq_flushed;
    359	u8 sq_flush_seen;
    360	u8 rq_flush_seen;
    361};
    362
    363struct irdma_cq_uk {
    364	struct irdma_cqe *cq_base;
    365	u32 __iomem *cqe_alloc_db;
    366	u32 __iomem *cq_ack_db;
    367	__le64 *shadow_area;
    368	u32 cq_id;
    369	u32 cq_size;
    370	struct irdma_ring cq_ring;
    371	u8 polarity;
    372	bool avoid_mem_cflct:1;
    373};
    374
    375struct irdma_qp_uk_init_info {
    376	struct irdma_qp_quanta *sq;
    377	struct irdma_qp_quanta *rq;
    378	struct irdma_uk_attrs *uk_attrs;
    379	u32 __iomem *wqe_alloc_db;
    380	__le64 *shadow_area;
    381	struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
    382	u64 *rq_wrid_array;
    383	u32 qp_id;
    384	u32 qp_caps;
    385	u32 sq_size;
    386	u32 rq_size;
    387	u32 max_sq_frag_cnt;
    388	u32 max_rq_frag_cnt;
    389	u32 max_inline_data;
    390	u8 first_sq_wq;
    391	u8 type;
    392	int abi_ver;
    393	bool legacy_mode;
    394};
    395
    396struct irdma_cq_uk_init_info {
    397	u32 __iomem *cqe_alloc_db;
    398	u32 __iomem *cq_ack_db;
    399	struct irdma_cqe *cq_base;
    400	__le64 *shadow_area;
    401	u32 cq_size;
    402	u32 cq_id;
    403	bool avoid_mem_cflct;
    404};
    405
    406__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
    407				   u16 quanta, u32 total_size,
    408				   struct irdma_post_sq_info *info);
    409__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
    410void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
    411int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
    412int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
    413int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
    414void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
    415			 u32 inline_data, u8 *shift);
    416int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
    417		      u32 *wqdepth);
    418int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
    419		      u32 *wqdepth);
    420void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
    421		       u32 wqe_idx, bool post_sq);
    422void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
    423#endif /* IRDMA_USER_H */