cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

nvmetcp_common.h (18277B)


      1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
      2/* Copyright 2021 Marvell. All rights reserved. */
      3
      4#ifndef __NVMETCP_COMMON__
      5#define __NVMETCP_COMMON__
      6
      7#include "tcp_common.h"
      8#include <linux/nvme-tcp.h>
      9
     10#define NVMETCP_SLOW_PATH_LAYER_CODE (6)
     11#define NVMETCP_WQE_NUM_SGES_SLOWIO (0xf)
     12
     13/* NVMeTCP firmware function init parameters */
     14struct nvmetcp_spe_func_init {
     15	__le16 half_way_close_timeout;
     16	u8 num_sq_pages_in_ring;
     17	u8 num_r2tq_pages_in_ring;
     18	u8 num_uhq_pages_in_ring;
     19	u8 ll2_rx_queue_id;
     20	u8 flags;
     21#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
     22#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
     23#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_MASK 0x1
     24#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_SHIFT 1
     25#define NVMETCP_SPE_FUNC_INIT_RESERVED0_MASK 0x3F
     26#define NVMETCP_SPE_FUNC_INIT_RESERVED0_SHIFT 2
     27	u8 debug_flags;
     28	__le16 reserved1;
     29	u8 params;
     30#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_MASK	0xF
     31#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT	0
     32#define NVMETCP_SPE_FUNC_INIT_RESERVED1_MASK	0xF
     33#define NVMETCP_SPE_FUNC_INIT_RESERVED1_SHIFT	4
     34	u8 reserved2[5];
     35	struct scsi_init_func_params func_params;
     36	struct scsi_init_func_queues q_params;
     37};
     38
     39/* NVMeTCP init params passed by driver to FW in NVMeTCP init ramrod. */
     40struct nvmetcp_init_ramrod_params {
     41	struct nvmetcp_spe_func_init nvmetcp_init_spe;
     42	struct tcp_init_params tcp_init;
     43};
     44
     45/* NVMeTCP Ramrod Command IDs */
     46enum nvmetcp_ramrod_cmd_id {
     47	NVMETCP_RAMROD_CMD_ID_UNUSED = 0,
     48	NVMETCP_RAMROD_CMD_ID_INIT_FUNC = 1,
     49	NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC = 2,
     50	NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
     51	NVMETCP_RAMROD_CMD_ID_UPDATE_CONN = 4,
     52	NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN = 5,
     53	NVMETCP_RAMROD_CMD_ID_CLEAR_SQ = 6,
     54	MAX_NVMETCP_RAMROD_CMD_ID
     55};
     56
     57struct nvmetcp_glbl_queue_entry {
     58	struct regpair cq_pbl_addr;
     59	struct regpair reserved;
     60};
     61
     62/* NVMeTCP conn level EQEs */
     63enum nvmetcp_eqe_opcode {
     64	NVMETCP_EVENT_TYPE_INIT_FUNC = 0, /* Response after init Ramrod */
     65	NVMETCP_EVENT_TYPE_DESTROY_FUNC, /* Response after destroy Ramrod */
     66	NVMETCP_EVENT_TYPE_OFFLOAD_CONN,/* Response after option 2 offload Ramrod */
     67	NVMETCP_EVENT_TYPE_UPDATE_CONN, /* Response after update Ramrod */
     68	NVMETCP_EVENT_TYPE_CLEAR_SQ, /* Response after clear sq Ramrod */
     69	NVMETCP_EVENT_TYPE_TERMINATE_CONN, /* Response after termination Ramrod */
     70	NVMETCP_EVENT_TYPE_RESERVED0,
     71	NVMETCP_EVENT_TYPE_RESERVED1,
     72	NVMETCP_EVENT_TYPE_ASYN_CONNECT_COMPLETE, /* Connect completed (A-syn EQE) */
     73	NVMETCP_EVENT_TYPE_ASYN_TERMINATE_DONE, /* Termination completed (A-syn EQE) */
     74	NVMETCP_EVENT_TYPE_START_OF_ERROR_TYPES = 10, /* Separate EQs from err EQs */
     75	NVMETCP_EVENT_TYPE_ASYN_ABORT_RCVD, /* TCP RST packet receive (A-syn EQE) */
     76	NVMETCP_EVENT_TYPE_ASYN_CLOSE_RCVD, /* TCP FIN packet receive (A-syn EQE) */
     77	NVMETCP_EVENT_TYPE_ASYN_SYN_RCVD, /* TCP SYN+ACK packet receive (A-syn EQE) */
     78	NVMETCP_EVENT_TYPE_ASYN_MAX_RT_TIME, /* TCP max retransmit time (A-syn EQE) */
     79	NVMETCP_EVENT_TYPE_ASYN_MAX_RT_CNT, /* TCP max retransmit count (A-syn EQE) */
     80	NVMETCP_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, /* TCP ka probes count (A-syn EQE) */
     81	NVMETCP_EVENT_TYPE_ASYN_FIN_WAIT2, /* TCP fin wait 2 (A-syn EQE) */
     82	NVMETCP_EVENT_TYPE_NVMETCP_CONN_ERROR, /* NVMeTCP error response (A-syn EQE) */
     83	NVMETCP_EVENT_TYPE_TCP_CONN_ERROR, /* NVMeTCP error - tcp error (A-syn EQE) */
     84	MAX_NVMETCP_EQE_OPCODE
     85};
     86
     87struct nvmetcp_conn_offload_section {
     88	struct regpair cccid_itid_table_addr; /* CCCID to iTID table address */
     89	__le16 cccid_max_range; /* CCCID max value - used for validation */
     90	__le16 reserved[3];
     91};
     92
     93/* NVMe TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod */
     94struct nvmetcp_conn_offload_params {
     95	struct regpair sq_pbl_addr;
     96	struct regpair r2tq_pbl_addr;
     97	struct regpair xhq_pbl_addr;
     98	struct regpair uhq_pbl_addr;
     99	__le16 physical_q0;
    100	__le16 physical_q1;
    101	u8 flags;
    102#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
    103#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
    104#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
    105#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
    106#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
    107#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
    108#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_MASK 0x1
    109#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_SHIFT 3
    110#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0xF
    111#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 4
    112	u8 default_cq;
    113	__le16 reserved0;
    114	__le32 reserved1;
    115	__le32 initial_ack;
    116
    117	struct nvmetcp_conn_offload_section nvmetcp; /* NVMe/TCP section */
    118};
    119
    120/* NVMe TCP and TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod. */
    121struct nvmetcp_spe_conn_offload {
    122	__le16 reserved;
    123	__le16 conn_id;
    124	__le32 fw_cid;
    125	struct nvmetcp_conn_offload_params nvmetcp;
    126	struct tcp_offload_params_opt2 tcp;
    127};
    128
    129/* NVMeTCP connection update params passed by driver to FW in NVMETCP update ramrod. */
    130struct nvmetcp_conn_update_ramrod_params {
    131	__le16 reserved0;
    132	__le16 conn_id;
    133	__le32 reserved1;
    134	u8 flags;
    135#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
    136#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
    137#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
    138#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
    139#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_MASK 0x1
    140#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_SHIFT 2
    141#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x1
    142#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_DATA_SHIFT 3
    143#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_MASK 0x1
    144#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_SHIFT 4
    145#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_MASK 0x1
    146#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_SHIFT 5
    147#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_MASK 0x1
    148#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_SHIFT 6
    149#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_MASK 0x1
    150#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_SHIFT 7
    151	u8 reserved3[3];
    152	__le32 max_seq_size;
    153	__le32 max_send_pdu_length;
    154	__le32 max_recv_pdu_length;
    155	__le32 first_seq_length;
    156	__le32 reserved4[5];
    157};
    158
    159/* NVMeTCP connection termination request */
    160struct nvmetcp_spe_conn_termination {
    161	__le16 reserved0;
    162	__le16 conn_id;
    163	__le32 reserved1;
    164	u8 abortive;
    165	u8 reserved2[7];
    166	struct regpair reserved3;
    167	struct regpair reserved4;
    168};
    169
    170struct nvmetcp_dif_flags {
    171	u8 flags;
    172};
    173
    174enum nvmetcp_wqe_type {
    175	NVMETCP_WQE_TYPE_NORMAL,
    176	NVMETCP_WQE_TYPE_TASK_CLEANUP,
    177	NVMETCP_WQE_TYPE_MIDDLE_PATH,
    178	NVMETCP_WQE_TYPE_IC,
    179	MAX_NVMETCP_WQE_TYPE
    180};
    181
    182struct nvmetcp_wqe {
    183	__le16 task_id;
    184	u8 flags;
    185#define NVMETCP_WQE_WQE_TYPE_MASK 0x7 /* [use nvmetcp_wqe_type] */
    186#define NVMETCP_WQE_WQE_TYPE_SHIFT 0
    187#define NVMETCP_WQE_NUM_SGES_MASK 0xF
    188#define NVMETCP_WQE_NUM_SGES_SHIFT 3
    189#define NVMETCP_WQE_RESPONSE_MASK 0x1
    190#define NVMETCP_WQE_RESPONSE_SHIFT 7
    191	struct nvmetcp_dif_flags prot_flags;
    192	__le32 contlen_cdbsize;
    193#define NVMETCP_WQE_CONT_LEN_MASK 0xFFFFFF
    194#define NVMETCP_WQE_CONT_LEN_SHIFT 0
    195#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_MASK 0xFF
    196#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_SHIFT 24
    197};
    198
    199struct nvmetcp_host_cccid_itid_entry {
    200	__le16 itid;
    201};
    202
    203struct nvmetcp_connect_done_results {
    204	__le16 icid;
    205	__le16 conn_id;
    206	struct tcp_ulp_connect_done_params params;
    207};
    208
    209struct nvmetcp_eqe_data {
    210	__le16 icid;
    211	__le16 conn_id;
    212	__le16 reserved;
    213	u8 error_code;
    214	u8 error_pdu_opcode_reserved;
    215#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
    216#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_SHIFT  0
    217#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK  0x1
    218#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT  6
    219#define NVMETCP_EQE_DATA_RESERVED0_MASK 0x1
    220#define NVMETCP_EQE_DATA_RESERVED0_SHIFT 7
    221};
    222
    223enum nvmetcp_task_type {
    224	NVMETCP_TASK_TYPE_HOST_WRITE,
    225	NVMETCP_TASK_TYPE_HOST_READ,
    226	NVMETCP_TASK_TYPE_INIT_CONN_REQUEST,
    227	NVMETCP_TASK_TYPE_RESERVED0,
    228	NVMETCP_TASK_TYPE_CLEANUP,
    229	NVMETCP_TASK_TYPE_HOST_READ_NO_CQE,
    230	MAX_NVMETCP_TASK_TYPE
    231};
    232
    233struct nvmetcp_db_data {
    234	u8 params;
    235#define NVMETCP_DB_DATA_DEST_MASK 0x3 /* destination of doorbell (use enum db_dest) */
    236#define NVMETCP_DB_DATA_DEST_SHIFT 0
    237#define NVMETCP_DB_DATA_AGG_CMD_MASK 0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
    238#define NVMETCP_DB_DATA_AGG_CMD_SHIFT 2
    239#define NVMETCP_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
    240#define NVMETCP_DB_DATA_BYPASS_EN_SHIFT 4
    241#define NVMETCP_DB_DATA_RESERVED_MASK 0x1
    242#define NVMETCP_DB_DATA_RESERVED_SHIFT 5
    243#define NVMETCP_DB_DATA_AGG_VAL_SEL_MASK 0x3 /* aggregative value selection */
    244#define NVMETCP_DB_DATA_AGG_VAL_SEL_SHIFT 6
    245	u8 agg_flags; /* bit for every DQ counter flags in CM context that DQ can increment */
    246	__le16 sq_prod;
    247};
    248
    249struct nvmetcp_fw_nvmf_cqe {
    250	__le32 reserved[4];
    251};
    252
    253struct nvmetcp_icresp_mdata {
    254	u8  digest;
    255	u8  cpda;
    256	__le16  pfv;
    257	__le32 maxdata;
    258	__le16 rsvd[4];
    259};
    260
    261union nvmetcp_fw_cqe_data {
    262	struct nvmetcp_fw_nvmf_cqe nvme_cqe;
    263	struct nvmetcp_icresp_mdata icresp_mdata;
    264};
    265
    266struct nvmetcp_fw_cqe {
    267	__le16 conn_id;
    268	u8 cqe_type;
    269	u8 cqe_error_status_bits;
    270#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
    271#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
    272#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
    273#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
    274#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
    275#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
    276	__le16 itid;
    277	u8 task_type;
    278	u8 fw_dbg_field;
    279	u8 caused_conn_err;
    280	u8 reserved0[3];
    281	__le32 reserved1;
    282	union nvmetcp_fw_cqe_data cqe_data;
    283	struct regpair task_opaque;
    284	__le32 reserved[6];
    285};
    286
    287enum nvmetcp_fw_cqes_type {
    288	NVMETCP_FW_CQE_TYPE_NORMAL = 1,
    289	NVMETCP_FW_CQE_TYPE_RESERVED0,
    290	NVMETCP_FW_CQE_TYPE_RESERVED1,
    291	NVMETCP_FW_CQE_TYPE_CLEANUP,
    292	NVMETCP_FW_CQE_TYPE_DUMMY,
    293	MAX_NVMETCP_FW_CQES_TYPE
    294};
    295
    296struct ystorm_nvmetcp_task_state {
    297	struct scsi_cached_sges data_desc;
    298	struct scsi_sgl_params sgl_params;
    299	__le32 resrved0;
    300	__le32 buffer_offset;
    301	__le16 cccid;
    302	struct nvmetcp_dif_flags dif_flags;
    303	u8 flags;
    304#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_MASK 0x1
    305#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_SHIFT 0
    306#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_MASK 0x1
    307#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_SHIFT 1
    308#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
    309#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
    310#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_MASK 0x1
    311#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_SHIFT 3
    312};
    313
    314struct ystorm_nvmetcp_task_rxmit_opt {
    315	__le32 reserved[4];
    316};
    317
    318struct nvmetcp_task_hdr {
    319	__le32 reg[18];
    320};
    321
    322struct nvmetcp_task_hdr_aligned {
    323	struct nvmetcp_task_hdr task_hdr;
    324	__le32 reserved[2];	/* HSI_COMMENT: Align to QREG */
    325};
    326
    327struct e5_tdif_task_context {
    328	__le32 reserved[16];
    329};
    330
    331struct e5_rdif_task_context {
    332	__le32 reserved[12];
    333};
    334
    335struct ystorm_nvmetcp_task_st_ctx {
    336	struct ystorm_nvmetcp_task_state state;
    337	struct ystorm_nvmetcp_task_rxmit_opt rxmit_opt;
    338	struct nvmetcp_task_hdr_aligned pdu_hdr;
    339};
    340
    341struct mstorm_nvmetcp_task_st_ctx {
    342	struct scsi_cached_sges data_desc;
    343	struct scsi_sgl_params sgl_params;
    344	__le32 rem_task_size;
    345	__le32 data_buffer_offset;
    346	u8 task_type;
    347	struct nvmetcp_dif_flags dif_flags;
    348	__le16 dif_task_icid;
    349	struct regpair reserved0;
    350	__le32 expected_itt;
    351	__le32 reserved1;
    352};
    353
    354struct ustorm_nvmetcp_task_st_ctx {
    355	__le32 rem_rcv_len;
    356	__le32 exp_data_transfer_len;
    357	__le32 exp_data_sn;
    358	struct regpair reserved0;
    359	__le32 reg1_map;
    360#define REG1_NUM_SGES_MASK 0xF
    361#define REG1_NUM_SGES_SHIFT 0
    362#define REG1_RESERVED1_MASK 0xFFFFFFF
    363#define REG1_RESERVED1_SHIFT 4
    364	u8 flags2;
    365#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_MASK 0x1
    366#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_SHIFT 0
    367#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_MASK 0x7F
    368#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_SHIFT 1
    369	struct nvmetcp_dif_flags dif_flags;
    370	__le16 reserved3;
    371	__le16 tqe_opaque[2];
    372	__le32 reserved5;
    373	__le32 nvme_tcp_opaque_lo;
    374	__le32 nvme_tcp_opaque_hi;
    375	u8 task_type;
    376	u8 error_flags;
    377#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
    378#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
    379#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
    380#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
    381#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
    382#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
    383#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_MASK 0x1
    384#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_SHIFT 3
    385	u8 flags;
    386#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_MASK 0x3
    387#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_SHIFT 0
    388#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
    389#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
    390#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
    391#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
    392#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
    393#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
    394#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
    395#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
    396#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
    397#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
    398	u8 cq_rss_number;
    399};
    400
    401struct e5_ystorm_nvmetcp_task_ag_ctx {
    402	u8 reserved /* cdu_validation */;
    403	u8 byte1 /* state_and_core_id */;
    404	__le16 word0 /* icid */;
    405	u8 flags0;
    406	u8 flags1;
    407	u8 flags2;
    408	u8 flags3;
    409	__le32 TTT;
    410	u8 byte2;
    411	u8 byte3;
    412	u8 byte4;
    413	u8 reserved7;
    414};
    415
    416struct e5_mstorm_nvmetcp_task_ag_ctx {
    417	u8 cdu_validation;
    418	u8 byte1;
    419	__le16 task_cid;
    420	u8 flags0;
    421#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
    422#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
    423#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
    424#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
    425#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
    426#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
    427#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_MASK 0x1
    428#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_SHIFT 6
    429#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
    430#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
    431	u8 flags1;
    432#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
    433#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
    434#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_MASK 0x3
    435#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_SHIFT 2
    436#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_MASK 0x3
    437#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_SHIFT 4
    438#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
    439#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
    440#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_MASK 0x1
    441#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_SHIFT 7
    442	u8 flags2;
    443	u8 flags3;
    444	__le32 reg0;
    445	u8 byte2;
    446	u8 byte3;
    447	u8 byte4;
    448	u8 reserved7;
    449};
    450
    451struct e5_ustorm_nvmetcp_task_ag_ctx {
    452	u8 reserved;
    453	u8 state_and_core_id;
    454	__le16 icid;
    455	u8 flags0;
    456#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
    457#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
    458#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
    459#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
    460#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
    461#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
    462#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
    463#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
    464	u8 flags1;
    465#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_MASK 0x3
    466#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_SHIFT 0
    467#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_MASK 0x3
    468#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_SHIFT 2
    469#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_MASK 0x3
    470#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_SHIFT 4
    471#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
    472#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
    473	u8 flags2;
    474#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
    475#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
    476#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
    477#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
    478#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
    479#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
    480#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_MASK 0x1
    481#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_SHIFT 3
    482#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
    483#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
    484#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
    485#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
    486#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_MASK 0x1
    487#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_SHIFT 6
    488#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
    489#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
    490	u8 flags3;
    491	u8 flags4;
    492#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_MASK 0x3
    493#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_SHIFT 0
    494#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_MASK 0x1
    495#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_SHIFT 2
    496#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_MASK 0x1
    497#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_SHIFT 3
    498#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
    499#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
    500	u8 byte2;
    501	u8 byte3;
    502	u8 reserved8;
    503	__le32 dif_err_intervals;
    504	__le32 dif_error_1st_interval;
    505	__le32 rcv_cont_len;
    506	__le32 exp_cont_len;
    507	__le32 total_data_acked;
    508	__le32 exp_data_acked;
    509	__le16 word1;
    510	__le16 next_tid;
    511	__le32 hdr_residual_count;
    512	__le32 exp_r2t_sn;
    513};
    514
    515struct e5_nvmetcp_task_context {
    516	struct ystorm_nvmetcp_task_st_ctx ystorm_st_context;
    517	struct e5_ystorm_nvmetcp_task_ag_ctx ystorm_ag_context;
    518	struct regpair ystorm_ag_padding[2];
    519	struct e5_tdif_task_context tdif_context;
    520	struct e5_mstorm_nvmetcp_task_ag_ctx mstorm_ag_context;
    521	struct regpair mstorm_ag_padding[2];
    522	struct e5_ustorm_nvmetcp_task_ag_ctx ustorm_ag_context;
    523	struct regpair ustorm_ag_padding[2];
    524	struct mstorm_nvmetcp_task_st_ctx mstorm_st_context;
    525	struct regpair mstorm_st_padding[2];
    526	struct ustorm_nvmetcp_task_st_ctx ustorm_st_context;
    527	struct regpair ustorm_st_padding[2];
    528	struct e5_rdif_task_context rdif_context;
    529};
    530
    531#endif /* __NVMETCP_COMMON__*/