cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qedi_fw.c (66453B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * QLogic iSCSI Offload Driver
      4 * Copyright (c) 2016 Cavium Inc.
      5 */
      6
      7#include <linux/blkdev.h>
      8#include <scsi/scsi_tcq.h>
      9#include <linux/delay.h>
     10
     11#include "qedi.h"
     12#include "qedi_iscsi.h"
     13#include "qedi_gbl.h"
     14#include "qedi_fw_iscsi.h"
     15#include "qedi_fw_scsi.h"
     16
     17static int send_iscsi_tmf(struct qedi_conn *qedi_conn,
     18			  struct iscsi_task *mtask, struct iscsi_task *ctask);
     19
     20void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
     21{
     22	struct scsi_cmnd *sc = cmd->scsi_cmd;
     23
     24	if (cmd->io_tbl.sge_valid && sc) {
     25		cmd->io_tbl.sge_valid = 0;
     26		scsi_dma_unmap(sc);
     27	}
     28}
     29
     30static void qedi_process_logout_resp(struct qedi_ctx *qedi,
     31				     union iscsi_cqe *cqe,
     32				     struct iscsi_task *task,
     33				     struct qedi_conn *qedi_conn)
     34{
     35	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
     36	struct iscsi_logout_rsp *resp_hdr;
     37	struct iscsi_session *session = conn->session;
     38	struct iscsi_logout_response_hdr *cqe_logout_response;
     39	struct qedi_cmd *cmd;
     40
     41	cmd = (struct qedi_cmd *)task->dd_data;
     42	cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
     43	spin_lock(&session->back_lock);
     44	resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr;
     45	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
     46	resp_hdr->opcode = cqe_logout_response->opcode;
     47	resp_hdr->flags = cqe_logout_response->flags;
     48	resp_hdr->hlength = 0;
     49
     50	resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
     51	resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn);
     52	resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
     53	resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
     54
     55	resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait);
     56	resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain);
     57
     58	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
     59		  "Freeing tid=0x%x for cid=0x%x\n",
     60		  cmd->task_id, qedi_conn->iscsi_conn_id);
     61
     62	spin_lock(&qedi_conn->list_lock);
     63	if (likely(cmd->io_cmd_in_list)) {
     64		cmd->io_cmd_in_list = false;
     65		list_del_init(&cmd->io_cmd);
     66		qedi_conn->active_cmd_count--;
     67	} else {
     68		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
     69			  "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
     70			  cmd->task_id, qedi_conn->iscsi_conn_id,
     71			  &cmd->io_cmd);
     72	}
     73	spin_unlock(&qedi_conn->list_lock);
     74
     75	cmd->state = RESPONSE_RECEIVED;
     76	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
     77
     78	spin_unlock(&session->back_lock);
     79}
     80
     81static void qedi_process_text_resp(struct qedi_ctx *qedi,
     82				   union iscsi_cqe *cqe,
     83				   struct iscsi_task *task,
     84				   struct qedi_conn *qedi_conn)
     85{
     86	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
     87	struct iscsi_session *session = conn->session;
     88	struct iscsi_task_context *task_ctx;
     89	struct iscsi_text_rsp *resp_hdr_ptr;
     90	struct iscsi_text_response_hdr *cqe_text_response;
     91	struct qedi_cmd *cmd;
     92	int pld_len;
     93
     94	cmd = (struct qedi_cmd *)task->dd_data;
     95	task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
     96
     97	cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
     98	spin_lock(&session->back_lock);
     99	resp_hdr_ptr =  (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr;
    100	memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr));
    101	resp_hdr_ptr->opcode = cqe_text_response->opcode;
    102	resp_hdr_ptr->flags = cqe_text_response->flags;
    103	resp_hdr_ptr->hlength = 0;
    104
    105	hton24(resp_hdr_ptr->dlength,
    106	       (cqe_text_response->hdr_second_dword &
    107		ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
    108
    109	resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
    110				      conn->session->age);
    111	resp_hdr_ptr->ttt = cqe_text_response->ttt;
    112	resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn);
    113	resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn);
    114	resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn);
    115
    116	pld_len = cqe_text_response->hdr_second_dword &
    117		  ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK;
    118	qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
    119
    120	memset(task_ctx, '\0', sizeof(*task_ctx));
    121
    122	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
    123		  "Freeing tid=0x%x for cid=0x%x\n",
    124		  cmd->task_id, qedi_conn->iscsi_conn_id);
    125
    126	spin_lock(&qedi_conn->list_lock);
    127	if (likely(cmd->io_cmd_in_list)) {
    128		cmd->io_cmd_in_list = false;
    129		list_del_init(&cmd->io_cmd);
    130		qedi_conn->active_cmd_count--;
    131	} else {
    132		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
    133			  "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
    134			  cmd->task_id, qedi_conn->iscsi_conn_id,
    135			  &cmd->io_cmd);
    136	}
    137	spin_unlock(&qedi_conn->list_lock);
    138
    139	cmd->state = RESPONSE_RECEIVED;
    140
    141	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
    142			     qedi_conn->gen_pdu.resp_buf,
    143			     (qedi_conn->gen_pdu.resp_wr_ptr -
    144			      qedi_conn->gen_pdu.resp_buf));
    145	spin_unlock(&session->back_lock);
    146}
    147
    148static void qedi_tmf_resp_work(struct work_struct *work)
    149{
    150	struct qedi_cmd *qedi_cmd =
    151				container_of(work, struct qedi_cmd, tmf_work);
    152	struct qedi_conn *qedi_conn = qedi_cmd->conn;
    153	struct qedi_ctx *qedi = qedi_conn->qedi;
    154	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
    155	struct iscsi_session *session = conn->session;
    156	struct iscsi_tm_rsp *resp_hdr_ptr;
    157	int rval = 0;
    158
    159	resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
    160
    161	rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
    162	if (rval)
    163		goto exit_tmf_resp;
    164
    165	spin_lock(&session->back_lock);
    166	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
    167	spin_unlock(&session->back_lock);
    168
    169exit_tmf_resp:
    170	kfree(resp_hdr_ptr);
    171
    172	spin_lock(&qedi_conn->tmf_work_lock);
    173	qedi_conn->fw_cleanup_works--;
    174	spin_unlock(&qedi_conn->tmf_work_lock);
    175}
    176
    177static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
    178				  union iscsi_cqe *cqe,
    179				  struct iscsi_task *task,
    180				  struct qedi_conn *qedi_conn)
    181
    182{
    183	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
    184	struct iscsi_session *session = conn->session;
    185	struct iscsi_tmf_response_hdr *cqe_tmp_response;
    186	struct iscsi_tm_rsp *resp_hdr_ptr;
    187	struct iscsi_tm *tmf_hdr;
    188	struct qedi_cmd *qedi_cmd = NULL;
    189
    190	cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
    191
    192	qedi_cmd = task->dd_data;
    193	qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC);
    194	if (!qedi_cmd->tmf_resp_buf) {
    195		QEDI_ERR(&qedi->dbg_ctx,
    196			 "Failed to allocate resp buf, cid=0x%x\n",
    197			  qedi_conn->iscsi_conn_id);
    198		return;
    199	}
    200
    201	spin_lock(&session->back_lock);
    202	resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
    203	memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
    204
    205	/* Fill up the header */
    206	resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
    207	resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
    208	resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
    209	resp_hdr_ptr->hlength = 0;
    210
    211	hton24(resp_hdr_ptr->dlength,
    212	       (cqe_tmp_response->hdr_second_dword &
    213		ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
    214	resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
    215				      conn->session->age);
    216	resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
    217	resp_hdr_ptr->exp_cmdsn  = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
    218	resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
    219
    220	tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
    221
    222	spin_lock(&qedi_conn->list_lock);
    223	if (likely(qedi_cmd->io_cmd_in_list)) {
    224		qedi_cmd->io_cmd_in_list = false;
    225		list_del_init(&qedi_cmd->io_cmd);
    226		qedi_conn->active_cmd_count--;
    227	}
    228	spin_unlock(&qedi_conn->list_lock);
    229
    230	spin_lock(&qedi_conn->tmf_work_lock);
    231	switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
    232	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
    233	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
    234	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
    235		if (qedi_conn->ep_disconnect_starting) {
    236			/* Session is down so ep_disconnect will clean up */
    237			spin_unlock(&qedi_conn->tmf_work_lock);
    238			goto unblock_sess;
    239		}
    240
    241		qedi_conn->fw_cleanup_works++;
    242		spin_unlock(&qedi_conn->tmf_work_lock);
    243
    244		INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
    245		queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
    246		goto unblock_sess;
    247	}
    248	spin_unlock(&qedi_conn->tmf_work_lock);
    249
    250	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
    251	kfree(resp_hdr_ptr);
    252
    253unblock_sess:
    254	spin_unlock(&session->back_lock);
    255}
    256
    257static void qedi_process_login_resp(struct qedi_ctx *qedi,
    258				    union iscsi_cqe *cqe,
    259				    struct iscsi_task *task,
    260				    struct qedi_conn *qedi_conn)
    261{
    262	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
    263	struct iscsi_session *session = conn->session;
    264	struct iscsi_task_context *task_ctx;
    265	struct iscsi_login_rsp *resp_hdr_ptr;
    266	struct iscsi_login_response_hdr *cqe_login_response;
    267	struct qedi_cmd *cmd;
    268	int pld_len;
    269
    270	cmd = (struct qedi_cmd *)task->dd_data;
    271
    272	cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
    273	task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
    274
    275	spin_lock(&session->back_lock);
    276	resp_hdr_ptr =  (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr;
    277	memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp));
    278	resp_hdr_ptr->opcode = cqe_login_response->opcode;
    279	resp_hdr_ptr->flags = cqe_login_response->flags_attr;
    280	resp_hdr_ptr->hlength = 0;
    281
    282	hton24(resp_hdr_ptr->dlength,
    283	       (cqe_login_response->hdr_second_dword &
    284		ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
    285	resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
    286				      conn->session->age);
    287	resp_hdr_ptr->tsih = cqe_login_response->tsih;
    288	resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn);
    289	resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn);
    290	resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn);
    291	resp_hdr_ptr->status_class = cqe_login_response->status_class;
    292	resp_hdr_ptr->status_detail = cqe_login_response->status_detail;
    293	pld_len = cqe_login_response->hdr_second_dword &
    294		  ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
    295	qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
    296
    297	spin_lock(&qedi_conn->list_lock);
    298	if (likely(cmd->io_cmd_in_list)) {
    299		cmd->io_cmd_in_list = false;
    300		list_del_init(&cmd->io_cmd);
    301		qedi_conn->active_cmd_count--;
    302	}
    303	spin_unlock(&qedi_conn->list_lock);
    304
    305	memset(task_ctx, '\0', sizeof(*task_ctx));
    306
    307	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
    308			     qedi_conn->gen_pdu.resp_buf,
    309			     (qedi_conn->gen_pdu.resp_wr_ptr -
    310			     qedi_conn->gen_pdu.resp_buf));
    311
    312	spin_unlock(&session->back_lock);
    313	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
    314		  "Freeing tid=0x%x for cid=0x%x\n",
    315		  cmd->task_id, qedi_conn->iscsi_conn_id);
    316	cmd->state = RESPONSE_RECEIVED;
    317}
    318
    319static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
    320				struct iscsi_cqe_unsolicited *cqe,
    321				char *ptr, int len)
    322{
    323	u16 idx = 0;
    324
    325	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
    326		  "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
    327		  len, qedi->bdq_prod_idx,
    328		  (qedi->bdq_prod_idx % qedi->rq_num_entries));
    329
    330	/* Obtain buffer address from rqe_opaque */
    331	idx = cqe->rqe_opaque;
    332	if (idx > (QEDI_BDQ_NUM - 1)) {
    333		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
    334			  "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
    335			  idx);
    336		return;
    337	}
    338
    339	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
    340		  "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
    341
    342	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
    343		  "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
    344	switch (cqe->unsol_cqe_type) {
    345	case ISCSI_CQE_UNSOLICITED_SINGLE:
    346	case ISCSI_CQE_UNSOLICITED_FIRST:
    347		if (len)
    348			memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len);
    349		break;
    350	case ISCSI_CQE_UNSOLICITED_MIDDLE:
    351	case ISCSI_CQE_UNSOLICITED_LAST:
    352		break;
    353	default:
    354		break;
    355	}
    356}
    357
    358static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
    359				struct iscsi_cqe_unsolicited *cqe,
    360				int count)
    361{
    362	u16 idx = 0;
    363	struct scsi_bd *pbl;
    364
    365	/* Obtain buffer address from rqe_opaque */
    366	idx = cqe->rqe_opaque;
    367	if (idx > (QEDI_BDQ_NUM - 1)) {
    368		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
    369			  "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
    370			  idx);
    371		return;
    372	}
    373
    374	pbl = (struct scsi_bd *)qedi->bdq_pbl;
    375	pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries);
    376	pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma));
    377	pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma));
    378	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
    379		  "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
    380		  pbl, pbl->address.hi, pbl->address.lo, idx);
    381	pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
    382	pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
    383	pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
    384	pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
    385
    386	/* Increment producer to let f/w know we've handled the frame */
    387	qedi->bdq_prod_idx += count;
    388
    389	writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
    390	readw(qedi->bdq_primary_prod);
    391
    392	writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
    393	readw(qedi->bdq_secondary_prod);
    394}
    395
    396static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
    397				      struct iscsi_cqe_unsolicited *cqe,
    398				      u32 pdu_len, u32 num_bdqs,
    399				      char *bdq_data)
    400{
    401	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
    402		  "num_bdqs [%d]\n", num_bdqs);
    403
    404	qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
    405	qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
    406}
    407
    408static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
    409				   union iscsi_cqe *cqe,
    410				   struct iscsi_task *task,
    411				   struct qedi_conn *qedi_conn, u16 que_idx)
    412{
    413	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
    414	struct iscsi_session *session = conn->session;
    415	struct iscsi_nop_in_hdr *cqe_nop_in;
    416	struct iscsi_nopin *hdr;
    417	struct qedi_cmd *cmd;
    418	int tgt_async_nop = 0;
    419	u32 lun[2];
    420	u32 pdu_len, num_bdqs;
    421	char bdq_data[QEDI_BDQ_BUF_SIZE];
    422	unsigned long flags;
    423
    424	spin_lock_bh(&session->back_lock);
    425	cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
    426
    427	pdu_len = cqe_nop_in->hdr_second_dword &
    428		  ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK;
    429	num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
    430
    431	hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr;
    432	memset(hdr, 0, sizeof(struct iscsi_hdr));
    433	hdr->opcode = cqe_nop_in->opcode;
    434	hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn);
    435	hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn);
    436	hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn);
    437	hdr->ttt = cpu_to_be32(cqe_nop_in->ttt);
    438
    439	if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
    440		spin_lock_irqsave(&qedi->hba_lock, flags);
    441		qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
    442					  pdu_len, num_bdqs, bdq_data);
    443		hdr->itt = RESERVED_ITT;
    444		tgt_async_nop = 1;
    445		spin_unlock_irqrestore(&qedi->hba_lock, flags);
    446		goto done;
    447	}
    448
    449	/* Response to one of our nop-outs */
    450	if (task) {
    451		cmd = task->dd_data;
    452		hdr->flags = ISCSI_FLAG_CMD_FINAL;
    453		hdr->itt = build_itt(cqe->cqe_solicited.itid,
    454				     conn->session->age);
    455		lun[0] = 0xffffffff;
    456		lun[1] = 0xffffffff;
    457		memcpy(&hdr->lun, lun, sizeof(struct scsi_lun));
    458		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
    459			  "Freeing tid=0x%x for cid=0x%x\n",
    460			  cmd->task_id, qedi_conn->iscsi_conn_id);
    461		cmd->state = RESPONSE_RECEIVED;
    462		spin_lock(&qedi_conn->list_lock);
    463		if (likely(cmd->io_cmd_in_list)) {
    464			cmd->io_cmd_in_list = false;
    465			list_del_init(&cmd->io_cmd);
    466			qedi_conn->active_cmd_count--;
    467		}
    468
    469		spin_unlock(&qedi_conn->list_lock);
    470	}
    471
    472done:
    473	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len);
    474
    475	spin_unlock_bh(&session->back_lock);
    476	return tgt_async_nop;
    477}
    478
    479static void qedi_process_async_mesg(struct qedi_ctx *qedi,
    480				    union iscsi_cqe *cqe,
    481				    struct iscsi_task *task,
    482				    struct qedi_conn *qedi_conn,
    483				    u16 que_idx)
    484{
    485	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
    486	struct iscsi_session *session = conn->session;
    487	struct iscsi_async_msg_hdr *cqe_async_msg;
    488	struct iscsi_async *resp_hdr;
    489	u32 lun[2];
    490	u32 pdu_len, num_bdqs;
    491	char bdq_data[QEDI_BDQ_BUF_SIZE];
    492	unsigned long flags;
    493
    494	spin_lock_bh(&session->back_lock);
    495
    496	cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
    497	pdu_len = cqe_async_msg->hdr_second_dword &
    498		ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK;
    499	num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
    500
    501	if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
    502		spin_lock_irqsave(&qedi->hba_lock, flags);
    503		qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
    504					  pdu_len, num_bdqs, bdq_data);
    505		spin_unlock_irqrestore(&qedi->hba_lock, flags);
    506	}
    507
    508	resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr;
    509	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
    510	resp_hdr->opcode = cqe_async_msg->opcode;
    511	resp_hdr->flags = 0x80;
    512
    513	lun[0] = cpu_to_be32(cqe_async_msg->lun.lo);
    514	lun[1] = cpu_to_be32(cqe_async_msg->lun.hi);
    515	memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun));
    516	resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn);
    517	resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn);
    518	resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn);
    519
    520	resp_hdr->async_event = cqe_async_msg->async_event;
    521	resp_hdr->async_vcode = cqe_async_msg->async_vcode;
    522
    523	resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv);
    524	resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv);
    525	resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv);
    526
    527	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data,
    528			     pdu_len);
    529
    530	spin_unlock_bh(&session->back_lock);
    531}
    532
    533static void qedi_process_reject_mesg(struct qedi_ctx *qedi,
    534				     union iscsi_cqe *cqe,
    535				     struct iscsi_task *task,
    536				     struct qedi_conn *qedi_conn,
    537				     uint16_t que_idx)
    538{
    539	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
    540	struct iscsi_session *session = conn->session;
    541	struct iscsi_reject_hdr *cqe_reject;
    542	struct iscsi_reject *hdr;
    543	u32 pld_len, num_bdqs;
    544	unsigned long flags;
    545
    546	spin_lock_bh(&session->back_lock);
    547	cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
    548	pld_len = cqe_reject->hdr_second_dword &
    549		  ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK;
    550	num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE;
    551
    552	if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
    553		spin_lock_irqsave(&qedi->hba_lock, flags);
    554		qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
    555					  pld_len, num_bdqs, conn->data);
    556		spin_unlock_irqrestore(&qedi->hba_lock, flags);
    557	}
    558	hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr;
    559	memset(hdr, 0, sizeof(struct iscsi_hdr));
    560	hdr->opcode = cqe_reject->opcode;
    561	hdr->reason = cqe_reject->hdr_reason;
    562	hdr->flags = cqe_reject->hdr_flags;
    563	hton24(hdr->dlength, (cqe_reject->hdr_second_dword &
    564			      ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK));
    565	hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn);
    566	hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn);
    567	hdr->statsn = cpu_to_be32(cqe_reject->stat_sn);
    568	hdr->ffffffff = cpu_to_be32(0xffffffff);
    569
    570	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
    571			     conn->data, pld_len);
    572	spin_unlock_bh(&session->back_lock);
    573}
    574
    575static void qedi_scsi_completion(struct qedi_ctx *qedi,
    576				 union iscsi_cqe *cqe,
    577				 struct iscsi_task *task,
    578				 struct iscsi_conn *conn)
    579{
    580	struct scsi_cmnd *sc_cmd;
    581	struct qedi_cmd *cmd = task->dd_data;
    582	struct iscsi_session *session = conn->session;
    583	struct iscsi_scsi_rsp *hdr;
    584	struct iscsi_data_in_hdr *cqe_data_in;
    585	int datalen = 0;
    586	struct qedi_conn *qedi_conn;
    587	u32 iscsi_cid;
    588	u8 cqe_err_bits = 0;
    589
    590	iscsi_cid  = cqe->cqe_common.conn_id;
    591	qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
    592
    593	cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
    594	cqe_err_bits =
    595		cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
    596
    597	spin_lock_bh(&session->back_lock);
    598	/* get the scsi command */
    599	sc_cmd = cmd->scsi_cmd;
    600
    601	if (!sc_cmd) {
    602		QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n");
    603		goto error;
    604	}
    605
    606	if (!iscsi_cmd(sc_cmd)->task) {
    607		QEDI_WARN(&qedi->dbg_ctx,
    608			  "NULL task pointer, returned in another context.\n");
    609		goto error;
    610	}
    611
    612	if (!scsi_cmd_to_rq(sc_cmd)->q) {
    613		QEDI_WARN(&qedi->dbg_ctx,
    614			  "request->q is NULL so request is not valid, sc_cmd=%p.\n",
    615			  sc_cmd);
    616		goto error;
    617	}
    618
    619	qedi_iscsi_unmap_sg_list(cmd);
    620
    621	hdr = (struct iscsi_scsi_rsp *)task->hdr;
    622	hdr->opcode = cqe_data_in->opcode;
    623	hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn);
    624	hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn);
    625	hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
    626	hdr->response = cqe_data_in->reserved1;
    627	hdr->cmd_status = cqe_data_in->status_rsvd;
    628	hdr->flags = cqe_data_in->flags;
    629	hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count);
    630
    631	if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
    632		datalen = cqe_data_in->reserved2 &
    633			  ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK;
    634		memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen);
    635	}
    636
    637	/* If f/w reports data underrun err then set residual to IO transfer
    638	 * length, set Underrun flag and clear Overrun flag explicitly
    639	 */
    640	if (unlikely(cqe_err_bits &&
    641		     GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) {
    642		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
    643			  "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
    644			  hdr->itt, cqe_data_in->flags, cmd->task_id,
    645			  qedi_conn->iscsi_conn_id, hdr->residual_count,
    646			  scsi_bufflen(sc_cmd));
    647		hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd));
    648		hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
    649		hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW);
    650	}
    651
    652	spin_lock(&qedi_conn->list_lock);
    653	if (likely(cmd->io_cmd_in_list)) {
    654		cmd->io_cmd_in_list = false;
    655		list_del_init(&cmd->io_cmd);
    656		qedi_conn->active_cmd_count--;
    657	}
    658	spin_unlock(&qedi_conn->list_lock);
    659
    660	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
    661		  "Freeing tid=0x%x for cid=0x%x\n",
    662		  cmd->task_id, qedi_conn->iscsi_conn_id);
    663	cmd->state = RESPONSE_RECEIVED;
    664	if (qedi_io_tracing)
    665		qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
    666
    667	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
    668			     conn->data, datalen);
    669error:
    670	spin_unlock_bh(&session->back_lock);
    671}
    672
    673static void qedi_mtask_completion(struct qedi_ctx *qedi,
    674				  union iscsi_cqe *cqe,
    675				  struct iscsi_task *task,
    676				  struct qedi_conn *conn, uint16_t que_idx)
    677{
    678	struct iscsi_conn *iscsi_conn;
    679	u32 hdr_opcode;
    680
    681	hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
    682	iscsi_conn = conn->cls_conn->dd_data;
    683
    684	switch (hdr_opcode) {
    685	case ISCSI_OPCODE_SCSI_RESPONSE:
    686	case ISCSI_OPCODE_DATA_IN:
    687		qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
    688		break;
    689	case ISCSI_OPCODE_LOGIN_RESPONSE:
    690		qedi_process_login_resp(qedi, cqe, task, conn);
    691		break;
    692	case ISCSI_OPCODE_TMF_RESPONSE:
    693		qedi_process_tmf_resp(qedi, cqe, task, conn);
    694		break;
    695	case ISCSI_OPCODE_TEXT_RESPONSE:
    696		qedi_process_text_resp(qedi, cqe, task, conn);
    697		break;
    698	case ISCSI_OPCODE_LOGOUT_RESPONSE:
    699		qedi_process_logout_resp(qedi, cqe, task, conn);
    700		break;
    701	case ISCSI_OPCODE_NOP_IN:
    702		qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
    703		break;
    704	default:
    705		QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n");
    706	}
    707}
    708
    709static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
    710					  struct iscsi_cqe_solicited *cqe,
    711					  struct iscsi_task *task,
    712					  struct qedi_conn *qedi_conn)
    713{
    714	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
    715	struct iscsi_session *session = conn->session;
    716	struct qedi_cmd *cmd = task->dd_data;
    717
    718	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
    719		  "itid=0x%x, cmd task id=0x%x\n",
    720		  cqe->itid, cmd->task_id);
    721
    722	cmd->state = RESPONSE_RECEIVED;
    723
    724	spin_lock_bh(&session->back_lock);
    725	__iscsi_put_task(task);
    726	spin_unlock_bh(&session->back_lock);
    727}
    728
    729static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
    730					  struct iscsi_cqe_solicited *cqe,
    731					  struct iscsi_conn *conn)
    732{
    733	struct qedi_work_map *work, *work_tmp;
    734	u32 proto_itt = cqe->itid;
    735	int found = 0;
    736	struct qedi_cmd *qedi_cmd = NULL;
    737	u32 iscsi_cid;
    738	struct qedi_conn *qedi_conn;
    739	struct qedi_cmd *dbg_cmd;
    740	struct iscsi_task *mtask, *task;
    741	struct iscsi_tm *tmf_hdr = NULL;
    742
    743	iscsi_cid = cqe->conn_id;
    744	qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
    745	if (!qedi_conn) {
    746		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
    747			  "icid not found 0x%x\n", cqe->conn_id);
    748		return;
    749	}
    750
    751	/* Based on this itt get the corresponding qedi_cmd */
    752	spin_lock_bh(&qedi_conn->tmf_work_lock);
    753	list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
    754				 list) {
    755		if (work->rtid == proto_itt) {
    756			/* We found the command */
    757			qedi_cmd = work->qedi_cmd;
    758			if (!qedi_cmd->list_tmf_work) {
    759				QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
    760					  "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
    761					  proto_itt, qedi_conn->iscsi_conn_id);
    762				WARN_ON(1);
    763			}
    764			found = 1;
    765			mtask = qedi_cmd->task;
    766			task = work->ctask;
    767			tmf_hdr = (struct iscsi_tm *)mtask->hdr;
    768
    769			list_del_init(&work->list);
    770			kfree(work);
    771			qedi_cmd->list_tmf_work = NULL;
    772		}
    773	}
    774	spin_unlock_bh(&qedi_conn->tmf_work_lock);
    775
    776	if (!found)
    777		goto check_cleanup_reqs;
    778
    779	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
    780		  "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
    781		  proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
    782
    783	spin_lock_bh(&conn->session->back_lock);
    784	if (iscsi_task_is_completed(task)) {
    785		QEDI_NOTICE(&qedi->dbg_ctx,
    786			    "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
    787			   get_itt(tmf_hdr->rtt), qedi_conn->iscsi_conn_id);
    788		goto unlock;
    789	}
    790
    791	dbg_cmd = task->dd_data;
    792
    793	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
    794		  "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
    795		  get_itt(tmf_hdr->rtt), get_itt(task->itt), dbg_cmd->task_id,
    796		  qedi_conn->iscsi_conn_id);
    797
    798	spin_lock(&qedi_conn->list_lock);
    799	if (likely(dbg_cmd->io_cmd_in_list)) {
    800		dbg_cmd->io_cmd_in_list = false;
    801		list_del_init(&dbg_cmd->io_cmd);
    802		qedi_conn->active_cmd_count--;
    803	}
    804	spin_unlock(&qedi_conn->list_lock);
    805	qedi_cmd->state = CLEANUP_RECV;
    806unlock:
    807	spin_unlock_bh(&conn->session->back_lock);
    808	wake_up_interruptible(&qedi_conn->wait_queue);
    809	return;
    810
    811check_cleanup_reqs:
    812	if (atomic_inc_return(&qedi_conn->cmd_cleanup_cmpl) ==
    813	    qedi_conn->cmd_cleanup_req) {
    814		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
    815			  "Freeing tid=0x%x for cid=0x%x\n",
    816			  cqe->itid, qedi_conn->iscsi_conn_id);
    817		wake_up(&qedi_conn->wait_queue);
    818	}
    819}
    820
    821void qedi_fp_process_cqes(struct qedi_work *work)
    822{
    823	struct qedi_ctx *qedi = work->qedi;
    824	union iscsi_cqe *cqe = &work->cqe;
    825	struct iscsi_task *task = NULL;
    826	struct iscsi_nopout *nopout_hdr;
    827	struct qedi_conn *q_conn;
    828	struct iscsi_conn *conn;
    829	struct qedi_cmd *qedi_cmd;
    830	u32 comp_type;
    831	u32 iscsi_cid;
    832	u32 hdr_opcode;
    833	u16 que_idx = work->que_idx;
    834	u8 cqe_err_bits = 0;
    835
    836	comp_type = cqe->cqe_common.cqe_type;
    837	hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
    838	cqe_err_bits =
    839		cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
    840
    841	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
    842		  "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
    843		  cqe->cqe_common.conn_id, comp_type, hdr_opcode);
    844
    845	if (comp_type >= MAX_ISCSI_CQES_TYPE) {
    846		QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n");
    847		return;
    848	}
    849
    850	iscsi_cid  = cqe->cqe_common.conn_id;
    851	q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
    852	if (!q_conn) {
    853		QEDI_WARN(&qedi->dbg_ctx,
    854			  "Session no longer exists for cid=0x%x!!\n",
    855			  iscsi_cid);
    856		return;
    857	}
    858
    859	conn = q_conn->cls_conn->dd_data;
    860
    861	if (unlikely(cqe_err_bits &&
    862		     GET_FIELD(cqe_err_bits,
    863			       CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) {
    864		iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
    865		return;
    866	}
    867
    868	switch (comp_type) {
    869	case ISCSI_CQE_TYPE_SOLICITED:
    870	case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
    871		qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
    872		task = qedi_cmd->task;
    873		if (!task) {
    874			QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n");
    875			return;
    876		}
    877
    878		/* Process NOPIN local completion */
    879		nopout_hdr = (struct iscsi_nopout *)task->hdr;
    880		if ((nopout_hdr->itt == RESERVED_ITT) &&
    881		    (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
    882			qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
    883						      task, q_conn);
    884		} else {
    885			cqe->cqe_solicited.itid =
    886					       qedi_get_itt(cqe->cqe_solicited);
    887			/* Process other solicited responses */
    888			qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
    889		}
    890		break;
    891	case ISCSI_CQE_TYPE_UNSOLICITED:
    892		switch (hdr_opcode) {
    893		case ISCSI_OPCODE_NOP_IN:
    894			qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
    895						que_idx);
    896			break;
    897		case ISCSI_OPCODE_ASYNC_MSG:
    898			qedi_process_async_mesg(qedi, cqe, task, q_conn,
    899						que_idx);
    900			break;
    901		case ISCSI_OPCODE_REJECT:
    902			qedi_process_reject_mesg(qedi, cqe, task, q_conn,
    903						 que_idx);
    904			break;
    905		}
    906		goto exit_fp_process;
    907	case ISCSI_CQE_TYPE_DUMMY:
    908		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n");
    909		goto exit_fp_process;
    910	case ISCSI_CQE_TYPE_TASK_CLEANUP:
    911		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
    912		qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, conn);
    913		goto exit_fp_process;
    914	default:
    915		QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
    916		break;
    917	}
    918
    919exit_fp_process:
    920	return;
    921}
    922
    923static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
    924{
    925	qedi_conn->ep->db_data.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
    926
    927	/* wmb - Make sure fw idx is coherent */
    928	wmb();
    929	writel(*(u32 *)&qedi_conn->ep->db_data, qedi_conn->ep->p_doorbell);
    930
    931	/* Make sure fw write idx is coherent, and include both memory barriers
    932	 * as a failsafe as for some architectures the call is the same but on
    933	 * others they are two different assembly operations.
    934	 */
    935	wmb();
    936	QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
    937		  "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
    938		  qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
    939		  qedi_conn->iscsi_conn_id);
    940}
    941
    942static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn)
    943{
    944	struct qedi_endpoint *ep;
    945	u16 rval;
    946
    947	ep = qedi_conn->ep;
    948	rval = ep->sq_prod_idx;
    949
    950	/* Increament SQ index */
    951	ep->sq_prod_idx++;
    952	ep->fw_sq_prod_idx++;
    953	if (ep->sq_prod_idx == QEDI_SQ_SIZE)
    954		ep->sq_prod_idx = 0;
    955
    956	return rval;
    957}
    958
    959int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
    960			  struct iscsi_task *task)
    961{
    962	struct iscsi_login_req_hdr login_req_pdu_header;
    963	struct scsi_sgl_task_params tx_sgl_task_params;
    964	struct scsi_sgl_task_params rx_sgl_task_params;
    965	struct iscsi_task_params task_params;
    966	struct iscsi_task_context *fw_task_ctx;
    967	struct qedi_ctx *qedi = qedi_conn->qedi;
    968	struct iscsi_login_req *login_hdr;
    969	struct scsi_sge *resp_sge = NULL;
    970	struct qedi_cmd *qedi_cmd;
    971	struct qedi_endpoint *ep;
    972	s16 tid = 0;
    973	u16 sq_idx = 0;
    974	int rval = 0;
    975
    976	resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
    977	qedi_cmd = (struct qedi_cmd *)task->dd_data;
    978	ep = qedi_conn->ep;
    979	login_hdr = (struct iscsi_login_req *)task->hdr;
    980
    981	tid = qedi_get_task_idx(qedi);
    982	if (tid == -1)
    983		return -ENOMEM;
    984
    985	fw_task_ctx =
    986	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
    987							       tid);
    988	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
    989
    990	qedi_cmd->task_id = tid;
    991
    992	memset(&task_params, 0, sizeof(task_params));
    993	memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header));
    994	memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
    995	memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
    996	/* Update header info */
    997	login_req_pdu_header.opcode = login_hdr->opcode;
    998	login_req_pdu_header.version_min = login_hdr->min_version;
    999	login_req_pdu_header.version_max = login_hdr->max_version;
   1000	login_req_pdu_header.flags_attr = login_hdr->flags;
   1001	login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid);
   1002	login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]);
   1003
   1004	login_req_pdu_header.tsih = login_hdr->tsih;
   1005	login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength);
   1006
   1007	qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
   1008	login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
   1009	login_req_pdu_header.cid = qedi_conn->iscsi_conn_id;
   1010	login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn);
   1011	login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
   1012	login_req_pdu_header.exp_stat_sn = 0;
   1013
   1014	/* Fill tx AHS and rx buffer */
   1015	tx_sgl_task_params.sgl =
   1016			       (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
   1017	tx_sgl_task_params.sgl_phys_addr.lo =
   1018					 (u32)(qedi_conn->gen_pdu.req_dma_addr);
   1019	tx_sgl_task_params.sgl_phys_addr.hi =
   1020			      (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
   1021	tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength);
   1022	tx_sgl_task_params.num_sges = 1;
   1023
   1024	rx_sgl_task_params.sgl =
   1025			      (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
   1026	rx_sgl_task_params.sgl_phys_addr.lo =
   1027					(u32)(qedi_conn->gen_pdu.resp_dma_addr);
   1028	rx_sgl_task_params.sgl_phys_addr.hi =
   1029			     (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
   1030	rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
   1031	rx_sgl_task_params.num_sges = 1;
   1032
   1033	/* Fill fw input params */
   1034	task_params.context = fw_task_ctx;
   1035	task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
   1036	task_params.itid = tid;
   1037	task_params.cq_rss_number = 0;
   1038	task_params.tx_io_size = ntoh24(login_hdr->dlength);
   1039	task_params.rx_io_size = resp_sge->sge_len;
   1040
   1041	sq_idx = qedi_get_wqe_idx(qedi_conn);
   1042	task_params.sqe = &ep->sq[sq_idx];
   1043
   1044	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
   1045	rval = init_initiator_login_request_task(&task_params,
   1046						 &login_req_pdu_header,
   1047						 &tx_sgl_task_params,
   1048						 &rx_sgl_task_params);
   1049	if (rval)
   1050		return -1;
   1051
   1052	spin_lock(&qedi_conn->list_lock);
   1053	list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
   1054	qedi_cmd->io_cmd_in_list = true;
   1055	qedi_conn->active_cmd_count++;
   1056	spin_unlock(&qedi_conn->list_lock);
   1057
   1058	qedi_ring_doorbell(qedi_conn);
   1059	return 0;
   1060}
   1061
   1062int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
   1063			   struct iscsi_task *task)
   1064{
   1065	struct iscsi_logout_req_hdr logout_pdu_header;
   1066	struct scsi_sgl_task_params tx_sgl_task_params;
   1067	struct scsi_sgl_task_params rx_sgl_task_params;
   1068	struct iscsi_task_params task_params;
   1069	struct iscsi_task_context *fw_task_ctx;
   1070	struct iscsi_logout *logout_hdr = NULL;
   1071	struct qedi_ctx *qedi = qedi_conn->qedi;
   1072	struct qedi_cmd *qedi_cmd;
   1073	struct qedi_endpoint *ep;
   1074	s16 tid = 0;
   1075	u16 sq_idx = 0;
   1076	int rval = 0;
   1077
   1078	qedi_cmd = (struct qedi_cmd *)task->dd_data;
   1079	logout_hdr = (struct iscsi_logout *)task->hdr;
   1080	ep = qedi_conn->ep;
   1081
   1082	tid = qedi_get_task_idx(qedi);
   1083	if (tid == -1)
   1084		return -ENOMEM;
   1085
   1086	fw_task_ctx =
   1087	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
   1088							       tid);
   1089	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
   1090
   1091	qedi_cmd->task_id = tid;
   1092
   1093	memset(&task_params, 0, sizeof(task_params));
   1094	memset(&logout_pdu_header, 0, sizeof(logout_pdu_header));
   1095	memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
   1096	memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
   1097
   1098	/* Update header info */
   1099	logout_pdu_header.opcode = logout_hdr->opcode;
   1100	logout_pdu_header.reason_code = 0x80 | logout_hdr->flags;
   1101	qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
   1102	logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
   1103	logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
   1104	logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
   1105	logout_pdu_header.cid = qedi_conn->iscsi_conn_id;
   1106
   1107	/* Fill fw input params */
   1108	task_params.context = fw_task_ctx;
   1109	task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
   1110	task_params.itid = tid;
   1111	task_params.cq_rss_number = 0;
   1112	task_params.tx_io_size = 0;
   1113	task_params.rx_io_size = 0;
   1114
   1115	sq_idx = qedi_get_wqe_idx(qedi_conn);
   1116	task_params.sqe = &ep->sq[sq_idx];
   1117	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
   1118
   1119	rval = init_initiator_logout_request_task(&task_params,
   1120						  &logout_pdu_header,
   1121						  NULL, NULL);
   1122	if (rval)
   1123		return -1;
   1124
   1125	spin_lock(&qedi_conn->list_lock);
   1126	list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
   1127	qedi_cmd->io_cmd_in_list = true;
   1128	qedi_conn->active_cmd_count++;
   1129	spin_unlock(&qedi_conn->list_lock);
   1130
   1131	qedi_ring_doorbell(qedi_conn);
   1132	return 0;
   1133}
   1134
   1135int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
   1136			struct iscsi_task *task, bool in_recovery)
   1137{
   1138	int rval;
   1139	struct iscsi_task *ctask;
   1140	struct qedi_cmd *cmd, *cmd_tmp;
   1141	struct iscsi_tm *tmf_hdr;
   1142	unsigned int lun = 0;
   1143	bool lun_reset = false;
   1144	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
   1145	struct iscsi_session *session = conn->session;
   1146
   1147	/* From recovery, task is NULL or from tmf resp valid task */
   1148	if (task) {
   1149		tmf_hdr = (struct iscsi_tm *)task->hdr;
   1150
   1151		if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
   1152			ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) {
   1153			lun_reset = true;
   1154			lun = scsilun_to_int(&tmf_hdr->lun);
   1155		}
   1156	}
   1157
   1158	qedi_conn->cmd_cleanup_req = 0;
   1159	atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
   1160
   1161	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
   1162		  "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
   1163		  qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id,
   1164		  in_recovery, lun_reset);
   1165
   1166	if (lun_reset)
   1167		spin_lock_bh(&session->back_lock);
   1168
   1169	spin_lock(&qedi_conn->list_lock);
   1170
   1171	list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
   1172				 io_cmd) {
   1173		ctask = cmd->task;
   1174		if (ctask == task)
   1175			continue;
   1176
   1177		if (lun_reset) {
   1178			if (cmd->scsi_cmd && cmd->scsi_cmd->device) {
   1179				QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
   1180					  "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
   1181					  cmd->task_id, get_itt(ctask->itt),
   1182					  cmd->scsi_cmd, cmd->scsi_cmd->device,
   1183					  ctask->state, cmd->state,
   1184					  qedi_conn->iscsi_conn_id);
   1185				if (cmd->scsi_cmd->device->lun != lun)
   1186					continue;
   1187			}
   1188		}
   1189		qedi_conn->cmd_cleanup_req++;
   1190		qedi_iscsi_cleanup_task(ctask, true);
   1191
   1192		cmd->io_cmd_in_list = false;
   1193		list_del_init(&cmd->io_cmd);
   1194		qedi_conn->active_cmd_count--;
   1195		QEDI_WARN(&qedi->dbg_ctx,
   1196			  "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
   1197			  &cmd->io_cmd, qedi_conn->iscsi_conn_id);
   1198	}
   1199
   1200	spin_unlock(&qedi_conn->list_lock);
   1201
   1202	if (lun_reset)
   1203		spin_unlock_bh(&session->back_lock);
   1204
   1205	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
   1206		  "cmd_cleanup_req=%d, cid=0x%x\n",
   1207		  qedi_conn->cmd_cleanup_req,
   1208		  qedi_conn->iscsi_conn_id);
   1209
   1210	rval  = wait_event_interruptible_timeout(qedi_conn->wait_queue,
   1211				(qedi_conn->cmd_cleanup_req ==
   1212				 atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
   1213				test_bit(QEDI_IN_RECOVERY, &qedi->flags),
   1214				5 * HZ);
   1215	if (rval) {
   1216		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
   1217			  "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
   1218			  qedi_conn->cmd_cleanup_req,
   1219			  atomic_read(&qedi_conn->cmd_cleanup_cmpl),
   1220			  qedi_conn->iscsi_conn_id);
   1221
   1222		return 0;
   1223	}
   1224
   1225	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
   1226		  "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
   1227		  qedi_conn->cmd_cleanup_req,
   1228		  atomic_read(&qedi_conn->cmd_cleanup_cmpl),
   1229		  qedi_conn->iscsi_conn_id);
   1230
   1231	iscsi_host_for_each_session(qedi->shost,
   1232				    qedi_mark_device_missing);
   1233	qedi_ops->common->drain(qedi->cdev);
   1234
   1235	/* Enable IOs for all other sessions except current.*/
   1236	if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
   1237				(qedi_conn->cmd_cleanup_req ==
   1238				 atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
   1239				test_bit(QEDI_IN_RECOVERY, &qedi->flags),
   1240				5 * HZ)) {
   1241		iscsi_host_for_each_session(qedi->shost,
   1242					    qedi_mark_device_available);
   1243		return -1;
   1244	}
   1245
   1246	iscsi_host_for_each_session(qedi->shost,
   1247				    qedi_mark_device_available);
   1248
   1249	return 0;
   1250}
   1251
   1252void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
   1253		  struct iscsi_task *task)
   1254{
   1255	struct qedi_endpoint *qedi_ep;
   1256	int rval;
   1257
   1258	qedi_ep = qedi_conn->ep;
   1259	qedi_conn->cmd_cleanup_req = 0;
   1260	atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
   1261
   1262	if (!qedi_ep) {
   1263		QEDI_WARN(&qedi->dbg_ctx,
   1264			  "Cannot proceed, ep already disconnected, cid=0x%x\n",
   1265			  qedi_conn->iscsi_conn_id);
   1266		return;
   1267	}
   1268
   1269	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
   1270		  "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
   1271		  qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep);
   1272
   1273	qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle);
   1274
   1275	rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
   1276	if (rval) {
   1277		QEDI_ERR(&qedi->dbg_ctx,
   1278			 "fatal error, need hard reset, cid=0x%x\n",
   1279			 qedi_conn->iscsi_conn_id);
   1280		WARN_ON(1);
   1281	}
   1282}
   1283
   1284static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
   1285					 struct qedi_conn *qedi_conn,
   1286					 struct iscsi_task *task,
   1287					 struct qedi_cmd *qedi_cmd,
   1288					 struct qedi_work_map *list_work)
   1289{
   1290	struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
   1291	int wait;
   1292
   1293	wait  = wait_event_interruptible_timeout(qedi_conn->wait_queue,
   1294						 ((qedi_cmd->state ==
   1295						   CLEANUP_RECV) ||
   1296						 ((qedi_cmd->type == TYPEIO) &&
   1297						  (cmd->state ==
   1298						   RESPONSE_RECEIVED))),
   1299						 5 * HZ);
   1300	if (!wait) {
   1301		qedi_cmd->state = CLEANUP_WAIT_FAILED;
   1302
   1303		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
   1304			  "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
   1305			  cmd->task_id, qedi_conn->iscsi_conn_id);
   1306
   1307		return -1;
   1308	}
   1309	return 0;
   1310}
   1311
   1312static void qedi_abort_work(struct work_struct *work)
   1313{
   1314	struct qedi_cmd *qedi_cmd =
   1315		container_of(work, struct qedi_cmd, tmf_work);
   1316	struct qedi_conn *qedi_conn = qedi_cmd->conn;
   1317	struct qedi_ctx *qedi = qedi_conn->qedi;
   1318	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
   1319	struct qedi_work_map *list_work = NULL;
   1320	struct iscsi_task *mtask;
   1321	struct qedi_cmd *cmd;
   1322	struct iscsi_task *ctask;
   1323	struct iscsi_tm *tmf_hdr;
   1324	s16 rval = 0;
   1325
   1326	mtask = qedi_cmd->task;
   1327	tmf_hdr = (struct iscsi_tm *)mtask->hdr;
   1328
   1329	spin_lock_bh(&conn->session->back_lock);
   1330	ctask = iscsi_itt_to_ctask(conn, tmf_hdr->rtt);
   1331	if (!ctask) {
   1332		spin_unlock_bh(&conn->session->back_lock);
   1333		QEDI_ERR(&qedi->dbg_ctx, "Invalid RTT. Letting abort timeout.\n");
   1334		goto clear_cleanup;
   1335	}
   1336
   1337	if (iscsi_task_is_completed(ctask)) {
   1338		spin_unlock_bh(&conn->session->back_lock);
   1339		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
   1340			  "Task already completed\n");
   1341		/*
   1342		 * We have to still send the TMF because libiscsi needs the
   1343		 * response to avoid a timeout.
   1344		 */
   1345		goto send_tmf;
   1346	}
   1347	spin_unlock_bh(&conn->session->back_lock);
   1348
   1349	cmd = (struct qedi_cmd *)ctask->dd_data;
   1350	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
   1351		  "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
   1352		  get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
   1353		  qedi_conn->iscsi_conn_id);
   1354
   1355	if (qedi_do_not_recover) {
   1356		QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
   1357			 qedi_do_not_recover);
   1358		goto clear_cleanup;
   1359	}
   1360
   1361	list_work = kzalloc(sizeof(*list_work), GFP_NOIO);
   1362	if (!list_work) {
   1363		QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
   1364		goto clear_cleanup;
   1365	}
   1366
   1367	qedi_cmd->type = TYPEIO;
   1368	qedi_cmd->state = CLEANUP_WAIT;
   1369	list_work->qedi_cmd = qedi_cmd;
   1370	list_work->rtid = cmd->task_id;
   1371	list_work->state = QEDI_WORK_SCHEDULED;
   1372	list_work->ctask = ctask;
   1373	qedi_cmd->list_tmf_work = list_work;
   1374
   1375	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
   1376		  "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
   1377		  list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id,
   1378		  tmf_hdr->flags);
   1379
   1380	spin_lock_bh(&qedi_conn->tmf_work_lock);
   1381	list_add_tail(&list_work->list, &qedi_conn->tmf_work_list);
   1382	spin_unlock_bh(&qedi_conn->tmf_work_lock);
   1383
   1384	qedi_iscsi_cleanup_task(ctask, false);
   1385
   1386	rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd,
   1387					     list_work);
   1388	if (rval == -1) {
   1389		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
   1390			  "FW cleanup got escalated, cid=0x%x\n",
   1391			  qedi_conn->iscsi_conn_id);
   1392		goto ldel_exit;
   1393	}
   1394
   1395send_tmf:
   1396	send_iscsi_tmf(qedi_conn, qedi_cmd->task, ctask);
   1397	goto clear_cleanup;
   1398
   1399ldel_exit:
   1400	spin_lock_bh(&qedi_conn->tmf_work_lock);
   1401	if (qedi_cmd->list_tmf_work) {
   1402		list_del_init(&list_work->list);
   1403		qedi_cmd->list_tmf_work = NULL;
   1404		kfree(list_work);
   1405	}
   1406	spin_unlock_bh(&qedi_conn->tmf_work_lock);
   1407
   1408	spin_lock(&qedi_conn->list_lock);
   1409	if (likely(cmd->io_cmd_in_list)) {
   1410		cmd->io_cmd_in_list = false;
   1411		list_del_init(&cmd->io_cmd);
   1412		qedi_conn->active_cmd_count--;
   1413	}
   1414	spin_unlock(&qedi_conn->list_lock);
   1415
   1416clear_cleanup:
   1417	spin_lock(&qedi_conn->tmf_work_lock);
   1418	qedi_conn->fw_cleanup_works--;
   1419	spin_unlock(&qedi_conn->tmf_work_lock);
   1420}
   1421
   1422static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask,
   1423			  struct iscsi_task *ctask)
   1424{
   1425	struct iscsi_tmf_request_hdr tmf_pdu_header;
   1426	struct iscsi_task_params task_params;
   1427	struct qedi_ctx *qedi = qedi_conn->qedi;
   1428	struct iscsi_task_context *fw_task_ctx;
   1429	struct iscsi_tm *tmf_hdr;
   1430	struct qedi_cmd *qedi_cmd;
   1431	struct qedi_cmd *cmd;
   1432	struct qedi_endpoint *ep;
   1433	u32 scsi_lun[2];
   1434	s16 tid = 0;
   1435	u16 sq_idx = 0;
   1436
   1437	tmf_hdr = (struct iscsi_tm *)mtask->hdr;
   1438	qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
   1439	ep = qedi_conn->ep;
   1440	if (!ep)
   1441		return -ENODEV;
   1442
   1443	tid = qedi_get_task_idx(qedi);
   1444	if (tid == -1)
   1445		return -ENOMEM;
   1446
   1447	fw_task_ctx =
   1448	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
   1449							       tid);
   1450	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
   1451
   1452	qedi_cmd->task_id = tid;
   1453
   1454	memset(&task_params, 0, sizeof(task_params));
   1455	memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header));
   1456
   1457	/* Update header info */
   1458	qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
   1459	tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt));
   1460	tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
   1461
   1462	memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
   1463	tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
   1464	tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
   1465
   1466	if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
   1467	     ISCSI_TM_FUNC_ABORT_TASK) {
   1468		cmd = (struct qedi_cmd *)ctask->dd_data;
   1469		tmf_pdu_header.rtt =
   1470				qedi_set_itt(cmd->task_id,
   1471					     get_itt(tmf_hdr->rtt));
   1472	} else {
   1473		tmf_pdu_header.rtt = ISCSI_RESERVED_TAG;
   1474	}
   1475
   1476	tmf_pdu_header.opcode = tmf_hdr->opcode;
   1477	tmf_pdu_header.function = tmf_hdr->flags;
   1478	tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength);
   1479	tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
   1480
   1481	/* Fill fw input params */
   1482	task_params.context = fw_task_ctx;
   1483	task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
   1484	task_params.itid = tid;
   1485	task_params.cq_rss_number = 0;
   1486	task_params.tx_io_size = 0;
   1487	task_params.rx_io_size = 0;
   1488
   1489	sq_idx = qedi_get_wqe_idx(qedi_conn);
   1490	task_params.sqe = &ep->sq[sq_idx];
   1491
   1492	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
   1493	init_initiator_tmf_request_task(&task_params, &tmf_pdu_header);
   1494
   1495	spin_lock(&qedi_conn->list_lock);
   1496	list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
   1497	qedi_cmd->io_cmd_in_list = true;
   1498	qedi_conn->active_cmd_count++;
   1499	spin_unlock(&qedi_conn->list_lock);
   1500
   1501	qedi_ring_doorbell(qedi_conn);
   1502	return 0;
   1503}
   1504
   1505int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask)
   1506{
   1507	struct iscsi_tm *tmf_hdr = (struct iscsi_tm *)mtask->hdr;
   1508	struct qedi_cmd *qedi_cmd = mtask->dd_data;
   1509	struct qedi_ctx *qedi = qedi_conn->qedi;
   1510	int rc = 0;
   1511
   1512	switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
   1513	case ISCSI_TM_FUNC_ABORT_TASK:
   1514		spin_lock(&qedi_conn->tmf_work_lock);
   1515		qedi_conn->fw_cleanup_works++;
   1516		spin_unlock(&qedi_conn->tmf_work_lock);
   1517
   1518		INIT_WORK(&qedi_cmd->tmf_work, qedi_abort_work);
   1519		queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
   1520		break;
   1521	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
   1522	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
   1523	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
   1524		rc = send_iscsi_tmf(qedi_conn, mtask, NULL);
   1525		break;
   1526	default:
   1527		QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
   1528			 qedi_conn->iscsi_conn_id);
   1529		return -EINVAL;
   1530	}
   1531
   1532	return rc;
   1533}
   1534
   1535int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
   1536			 struct iscsi_task *task)
   1537{
   1538	struct iscsi_text_request_hdr text_request_pdu_header;
   1539	struct scsi_sgl_task_params tx_sgl_task_params;
   1540	struct scsi_sgl_task_params rx_sgl_task_params;
   1541	struct iscsi_task_params task_params;
   1542	struct iscsi_task_context *fw_task_ctx;
   1543	struct qedi_ctx *qedi = qedi_conn->qedi;
   1544	struct iscsi_text *text_hdr;
   1545	struct scsi_sge *req_sge = NULL;
   1546	struct scsi_sge *resp_sge = NULL;
   1547	struct qedi_cmd *qedi_cmd;
   1548	struct qedi_endpoint *ep;
   1549	s16 tid = 0;
   1550	u16 sq_idx = 0;
   1551	int rval = 0;
   1552
   1553	req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
   1554	resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
   1555	qedi_cmd = (struct qedi_cmd *)task->dd_data;
   1556	text_hdr = (struct iscsi_text *)task->hdr;
   1557	ep = qedi_conn->ep;
   1558
   1559	tid = qedi_get_task_idx(qedi);
   1560	if (tid == -1)
   1561		return -ENOMEM;
   1562
   1563	fw_task_ctx =
   1564	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
   1565							       tid);
   1566	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
   1567
   1568	qedi_cmd->task_id = tid;
   1569
   1570	memset(&task_params, 0, sizeof(task_params));
   1571	memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header));
   1572	memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
   1573	memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
   1574
   1575	/* Update header info */
   1576	text_request_pdu_header.opcode = text_hdr->opcode;
   1577	text_request_pdu_header.flags_attr = text_hdr->flags;
   1578
   1579	qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
   1580	text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
   1581	text_request_pdu_header.ttt = text_hdr->ttt;
   1582	text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn);
   1583	text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
   1584	text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength);
   1585
   1586	/* Fill tx AHS and rx buffer */
   1587	tx_sgl_task_params.sgl =
   1588			       (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
   1589	tx_sgl_task_params.sgl_phys_addr.lo =
   1590					 (u32)(qedi_conn->gen_pdu.req_dma_addr);
   1591	tx_sgl_task_params.sgl_phys_addr.hi =
   1592			      (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
   1593	tx_sgl_task_params.total_buffer_size = req_sge->sge_len;
   1594	tx_sgl_task_params.num_sges = 1;
   1595
   1596	rx_sgl_task_params.sgl =
   1597			      (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
   1598	rx_sgl_task_params.sgl_phys_addr.lo =
   1599					(u32)(qedi_conn->gen_pdu.resp_dma_addr);
   1600	rx_sgl_task_params.sgl_phys_addr.hi =
   1601			     (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
   1602	rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
   1603	rx_sgl_task_params.num_sges = 1;
   1604
   1605	/* Fill fw input params */
   1606	task_params.context = fw_task_ctx;
   1607	task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
   1608	task_params.itid = tid;
   1609	task_params.cq_rss_number = 0;
   1610	task_params.tx_io_size = ntoh24(text_hdr->dlength);
   1611	task_params.rx_io_size = resp_sge->sge_len;
   1612
   1613	sq_idx = qedi_get_wqe_idx(qedi_conn);
   1614	task_params.sqe = &ep->sq[sq_idx];
   1615
   1616	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
   1617	rval = init_initiator_text_request_task(&task_params,
   1618						&text_request_pdu_header,
   1619						&tx_sgl_task_params,
   1620						&rx_sgl_task_params);
   1621	if (rval)
   1622		return -1;
   1623
   1624	spin_lock(&qedi_conn->list_lock);
   1625	list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
   1626	qedi_cmd->io_cmd_in_list = true;
   1627	qedi_conn->active_cmd_count++;
   1628	spin_unlock(&qedi_conn->list_lock);
   1629
   1630	qedi_ring_doorbell(qedi_conn);
   1631	return 0;
   1632}
   1633
   1634int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
   1635			   struct iscsi_task *task,
   1636			   char *datap, int data_len, int unsol)
   1637{
   1638	struct iscsi_nop_out_hdr nop_out_pdu_header;
   1639	struct scsi_sgl_task_params tx_sgl_task_params;
   1640	struct scsi_sgl_task_params rx_sgl_task_params;
   1641	struct iscsi_task_params task_params;
   1642	struct qedi_ctx *qedi = qedi_conn->qedi;
   1643	struct iscsi_task_context *fw_task_ctx;
   1644	struct iscsi_nopout *nopout_hdr;
   1645	struct scsi_sge *resp_sge = NULL;
   1646	struct qedi_cmd *qedi_cmd;
   1647	struct qedi_endpoint *ep;
   1648	u32 scsi_lun[2];
   1649	s16 tid = 0;
   1650	u16 sq_idx = 0;
   1651	int rval = 0;
   1652
   1653	resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
   1654	qedi_cmd = (struct qedi_cmd *)task->dd_data;
   1655	nopout_hdr = (struct iscsi_nopout *)task->hdr;
   1656	ep = qedi_conn->ep;
   1657
   1658	tid = qedi_get_task_idx(qedi);
   1659	if (tid == -1)
   1660		return -ENOMEM;
   1661
   1662	fw_task_ctx =
   1663	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
   1664							       tid);
   1665	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
   1666
   1667	qedi_cmd->task_id = tid;
   1668
   1669	memset(&task_params, 0, sizeof(task_params));
   1670	memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header));
   1671	memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
   1672	memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
   1673
   1674	/* Update header info */
   1675	nop_out_pdu_header.opcode = nopout_hdr->opcode;
   1676	SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
   1677	SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
   1678
   1679	memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
   1680	nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
   1681	nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
   1682	nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
   1683	nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
   1684
   1685	qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
   1686
   1687	if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
   1688		nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt);
   1689		nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt);
   1690	} else {
   1691		nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
   1692		nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES;
   1693
   1694		spin_lock(&qedi_conn->list_lock);
   1695		list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
   1696		qedi_cmd->io_cmd_in_list = true;
   1697		qedi_conn->active_cmd_count++;
   1698		spin_unlock(&qedi_conn->list_lock);
   1699	}
   1700
   1701	/* Fill tx AHS and rx buffer */
   1702	if (data_len) {
   1703		tx_sgl_task_params.sgl =
   1704			       (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
   1705		tx_sgl_task_params.sgl_phys_addr.lo =
   1706					 (u32)(qedi_conn->gen_pdu.req_dma_addr);
   1707		tx_sgl_task_params.sgl_phys_addr.hi =
   1708			      (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
   1709		tx_sgl_task_params.total_buffer_size = data_len;
   1710		tx_sgl_task_params.num_sges = 1;
   1711
   1712		rx_sgl_task_params.sgl =
   1713			      (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
   1714		rx_sgl_task_params.sgl_phys_addr.lo =
   1715					(u32)(qedi_conn->gen_pdu.resp_dma_addr);
   1716		rx_sgl_task_params.sgl_phys_addr.hi =
   1717			     (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
   1718		rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
   1719		rx_sgl_task_params.num_sges = 1;
   1720	}
   1721
   1722	/* Fill fw input params */
   1723	task_params.context = fw_task_ctx;
   1724	task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
   1725	task_params.itid = tid;
   1726	task_params.cq_rss_number = 0;
   1727	task_params.tx_io_size = data_len;
   1728	task_params.rx_io_size = resp_sge->sge_len;
   1729
   1730	sq_idx = qedi_get_wqe_idx(qedi_conn);
   1731	task_params.sqe = &ep->sq[sq_idx];
   1732
   1733	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
   1734	rval = init_initiator_nop_out_task(&task_params,
   1735					   &nop_out_pdu_header,
   1736					   &tx_sgl_task_params,
   1737					   &rx_sgl_task_params);
   1738	if (rval)
   1739		return -1;
   1740
   1741	qedi_ring_doorbell(qedi_conn);
   1742	return 0;
   1743}
   1744
   1745static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
   1746			 int bd_index)
   1747{
   1748	struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
   1749	int frag_size, sg_frags;
   1750
   1751	sg_frags = 0;
   1752
   1753	while (sg_len) {
   1754		if (addr % QEDI_PAGE_SIZE)
   1755			frag_size =
   1756				   (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE));
   1757		else
   1758			frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 :
   1759				    (sg_len % QEDI_BD_SPLIT_SZ);
   1760
   1761		if (frag_size == 0)
   1762			frag_size = QEDI_BD_SPLIT_SZ;
   1763
   1764		bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff);
   1765		bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32);
   1766		bd[bd_index + sg_frags].sge_len = (u16)frag_size;
   1767		QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO,
   1768			  "split sge %d: addr=%llx, len=%x",
   1769			  (bd_index + sg_frags), addr, frag_size);
   1770
   1771		addr += (u64)frag_size;
   1772		sg_frags++;
   1773		sg_len -= frag_size;
   1774	}
   1775	return sg_frags;
   1776}
   1777
   1778static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
   1779{
   1780	struct scsi_cmnd *sc = cmd->scsi_cmd;
   1781	struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
   1782	struct scatterlist *sg;
   1783	int byte_count = 0;
   1784	int bd_count = 0;
   1785	int sg_count;
   1786	int sg_len;
   1787	int sg_frags;
   1788	u64 addr, end_addr;
   1789	int i;
   1790
   1791	WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD);
   1792
   1793	sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc),
   1794			      scsi_sg_count(sc), sc->sc_data_direction);
   1795
   1796	/*
   1797	 * New condition to send single SGE as cached-SGL.
   1798	 * Single SGE with length less than 64K.
   1799	 */
   1800	sg = scsi_sglist(sc);
   1801	if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) {
   1802		sg_len = sg_dma_len(sg);
   1803		addr = (u64)sg_dma_address(sg);
   1804
   1805		bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
   1806		bd[bd_count].sge_addr.hi = (addr >> 32);
   1807		bd[bd_count].sge_len = (u16)sg_len;
   1808
   1809		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
   1810			  "single-cached-sgl: bd_count:%d addr=%llx, len=%x",
   1811			  sg_count, addr, sg_len);
   1812
   1813		return ++bd_count;
   1814	}
   1815
   1816	scsi_for_each_sg(sc, sg, sg_count, i) {
   1817		sg_len = sg_dma_len(sg);
   1818		addr = (u64)sg_dma_address(sg);
   1819		end_addr = (addr + sg_len);
   1820
   1821		/*
   1822		 * first sg elem in the 'list',
   1823		 * check if end addr is page-aligned.
   1824		 */
   1825		if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE))
   1826			cmd->use_slowpath = true;
   1827
   1828		/*
   1829		 * last sg elem in the 'list',
   1830		 * check if start addr is page-aligned.
   1831		 */
   1832		else if ((i == (sg_count - 1)) &&
   1833			 (sg_count > 1) && (addr % QEDI_PAGE_SIZE))
   1834			cmd->use_slowpath = true;
   1835
   1836		/*
   1837		 * middle sg elements in list,
   1838		 * check if start and end addr is page-aligned
   1839		 */
   1840		else if ((i != 0) && (i != (sg_count - 1)) &&
   1841			 ((addr % QEDI_PAGE_SIZE) ||
   1842			 (end_addr % QEDI_PAGE_SIZE)))
   1843			cmd->use_slowpath = true;
   1844
   1845		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x",
   1846			  i, sg_len);
   1847
   1848		if (sg_len > QEDI_BD_SPLIT_SZ) {
   1849			sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count);
   1850		} else {
   1851			sg_frags = 1;
   1852			bd[bd_count].sge_addr.lo = addr & 0xffffffff;
   1853			bd[bd_count].sge_addr.hi = addr >> 32;
   1854			bd[bd_count].sge_len = sg_len;
   1855		}
   1856		byte_count += sg_len;
   1857		bd_count += sg_frags;
   1858	}
   1859
   1860	if (byte_count != scsi_bufflen(sc))
   1861		QEDI_ERR(&qedi->dbg_ctx,
   1862			 "byte_count = %d != scsi_bufflen = %d\n", byte_count,
   1863			 scsi_bufflen(sc));
   1864	else
   1865		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n",
   1866			  byte_count);
   1867
   1868	WARN_ON(byte_count != scsi_bufflen(sc));
   1869
   1870	return bd_count;
   1871}
   1872
   1873static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
   1874{
   1875	int bd_count;
   1876	struct scsi_cmnd *sc = cmd->scsi_cmd;
   1877
   1878	if (scsi_sg_count(sc)) {
   1879		bd_count  = qedi_map_scsi_sg(cmd->conn->qedi, cmd);
   1880		if (bd_count == 0)
   1881			return;
   1882	} else {
   1883		struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
   1884
   1885		bd[0].sge_addr.lo = 0;
   1886		bd[0].sge_addr.hi = 0;
   1887		bd[0].sge_len = 0;
   1888		bd_count = 0;
   1889	}
   1890	cmd->io_tbl.sge_valid = bd_count;
   1891}
   1892
   1893static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp)
   1894{
   1895	u32 dword;
   1896	int lpcnt;
   1897	u8 *srcp;
   1898
   1899	lpcnt = sc->cmd_len / sizeof(dword);
   1900	srcp = (u8 *)sc->cmnd;
   1901	while (lpcnt--) {
   1902		memcpy(&dword, (const void *)srcp, 4);
   1903		*dstp = cpu_to_be32(dword);
   1904		srcp += 4;
   1905		dstp++;
   1906	}
   1907	if (sc->cmd_len & 0x3) {
   1908		dword = (u32)srcp[0] | ((u32)srcp[1] << 8);
   1909		*dstp = cpu_to_be32(dword);
   1910	}
   1911}
   1912
   1913void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
   1914		   u16 tid, int8_t direction)
   1915{
   1916	struct qedi_io_log *io_log;
   1917	struct iscsi_conn *conn = task->conn;
   1918	struct qedi_conn *qedi_conn = conn->dd_data;
   1919	struct scsi_cmnd *sc_cmd = task->sc;
   1920	unsigned long flags;
   1921
   1922	spin_lock_irqsave(&qedi->io_trace_lock, flags);
   1923
   1924	io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
   1925	io_log->direction = direction;
   1926	io_log->task_id = tid;
   1927	io_log->cid = qedi_conn->iscsi_conn_id;
   1928	io_log->lun = sc_cmd->device->lun;
   1929	io_log->op = sc_cmd->cmnd[0];
   1930	io_log->lba[0] = sc_cmd->cmnd[2];
   1931	io_log->lba[1] = sc_cmd->cmnd[3];
   1932	io_log->lba[2] = sc_cmd->cmnd[4];
   1933	io_log->lba[3] = sc_cmd->cmnd[5];
   1934	io_log->bufflen = scsi_bufflen(sc_cmd);
   1935	io_log->sg_count = scsi_sg_count(sc_cmd);
   1936	io_log->fast_sgs = qedi->fast_sgls;
   1937	io_log->cached_sgs = qedi->cached_sgls;
   1938	io_log->slow_sgs = qedi->slow_sgls;
   1939	io_log->cached_sge = qedi->use_cached_sge;
   1940	io_log->slow_sge = qedi->use_slow_sge;
   1941	io_log->fast_sge = qedi->use_fast_sge;
   1942	io_log->result = sc_cmd->result;
   1943	io_log->jiffies = jiffies;
   1944	io_log->blk_req_cpu = smp_processor_id();
   1945
   1946	if (direction == QEDI_IO_TRACE_REQ) {
   1947		/* For requests we only care about the submission CPU */
   1948		io_log->req_cpu = smp_processor_id() % qedi->num_queues;
   1949		io_log->intr_cpu = 0;
   1950		io_log->blk_rsp_cpu = 0;
   1951	} else if (direction == QEDI_IO_TRACE_RSP) {
   1952		io_log->req_cpu = smp_processor_id() % qedi->num_queues;
   1953		io_log->intr_cpu = qedi->intr_cpu;
   1954		io_log->blk_rsp_cpu = smp_processor_id();
   1955	}
   1956
   1957	qedi->io_trace_idx++;
   1958	if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
   1959		qedi->io_trace_idx = 0;
   1960
   1961	qedi->use_cached_sge = false;
   1962	qedi->use_slow_sge = false;
   1963	qedi->use_fast_sge = false;
   1964
   1965	spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
   1966}
   1967
   1968int qedi_iscsi_send_ioreq(struct iscsi_task *task)
   1969{
   1970	struct iscsi_conn *conn = task->conn;
   1971	struct iscsi_session *session = conn->session;
   1972	struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
   1973	struct qedi_ctx *qedi = iscsi_host_priv(shost);
   1974	struct qedi_conn *qedi_conn = conn->dd_data;
   1975	struct qedi_cmd *cmd = task->dd_data;
   1976	struct scsi_cmnd *sc = task->sc;
   1977	struct iscsi_cmd_hdr cmd_pdu_header;
   1978	struct scsi_sgl_task_params tx_sgl_task_params;
   1979	struct scsi_sgl_task_params rx_sgl_task_params;
   1980	struct scsi_sgl_task_params *prx_sgl = NULL;
   1981	struct scsi_sgl_task_params *ptx_sgl = NULL;
   1982	struct iscsi_task_params task_params;
   1983	struct iscsi_conn_params conn_params;
   1984	struct scsi_initiator_cmd_params cmd_params;
   1985	struct iscsi_task_context *fw_task_ctx;
   1986	struct iscsi_cls_conn *cls_conn;
   1987	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
   1988	enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
   1989	struct qedi_endpoint *ep;
   1990	u32 scsi_lun[2];
   1991	s16 tid = 0;
   1992	u16 sq_idx = 0;
   1993	u16 cq_idx;
   1994	int rval = 0;
   1995
   1996	ep = qedi_conn->ep;
   1997	cls_conn = qedi_conn->cls_conn;
   1998	conn = cls_conn->dd_data;
   1999
   2000	qedi_iscsi_map_sg_list(cmd);
   2001	int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
   2002
   2003	tid = qedi_get_task_idx(qedi);
   2004	if (tid == -1)
   2005		return -ENOMEM;
   2006
   2007	fw_task_ctx =
   2008	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
   2009							       tid);
   2010	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
   2011
   2012	cmd->task_id = tid;
   2013
   2014	memset(&task_params, 0, sizeof(task_params));
   2015	memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header));
   2016	memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
   2017	memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
   2018	memset(&conn_params, 0, sizeof(conn_params));
   2019	memset(&cmd_params, 0, sizeof(cmd_params));
   2020
   2021	cq_idx = smp_processor_id() % qedi->num_queues;
   2022	/* Update header info */
   2023	SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
   2024		  ISCSI_ATTR_SIMPLE);
   2025	if (hdr->cdb[0] != TEST_UNIT_READY) {
   2026		if (sc->sc_data_direction == DMA_TO_DEVICE) {
   2027			SET_FIELD(cmd_pdu_header.flags_attr,
   2028				  ISCSI_CMD_HDR_WRITE, 1);
   2029			task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
   2030		} else {
   2031			SET_FIELD(cmd_pdu_header.flags_attr,
   2032				  ISCSI_CMD_HDR_READ, 1);
   2033			task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
   2034		}
   2035	}
   2036
   2037	cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
   2038	cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
   2039
   2040	qedi_update_itt_map(qedi, tid, task->itt, cmd);
   2041	cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
   2042	cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
   2043	cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
   2044	cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
   2045	cmd_pdu_header.hdr_first_byte = hdr->opcode;
   2046	qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
   2047
   2048	/* Fill tx AHS and rx buffer */
   2049	if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
   2050		tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
   2051		tx_sgl_task_params.sgl_phys_addr.lo =
   2052						 (u32)(cmd->io_tbl.sge_tbl_dma);
   2053		tx_sgl_task_params.sgl_phys_addr.hi =
   2054				      (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
   2055		tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
   2056		tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
   2057		if (cmd->use_slowpath)
   2058			tx_sgl_task_params.small_mid_sge = true;
   2059	} else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
   2060		rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
   2061		rx_sgl_task_params.sgl_phys_addr.lo =
   2062						 (u32)(cmd->io_tbl.sge_tbl_dma);
   2063		rx_sgl_task_params.sgl_phys_addr.hi =
   2064				      (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
   2065		rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
   2066		rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
   2067	}
   2068
   2069	/* Add conn param */
   2070	conn_params.first_burst_length = conn->session->first_burst;
   2071	conn_params.max_send_pdu_length = conn->max_xmit_dlength;
   2072	conn_params.max_burst_length = conn->session->max_burst;
   2073	if (conn->session->initial_r2t_en)
   2074		conn_params.initial_r2t = true;
   2075	if (conn->session->imm_data_en)
   2076		conn_params.immediate_data = true;
   2077
   2078	/* Add cmd params */
   2079	cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma;
   2080	cmd_params.sense_data_buffer_phys_addr.hi =
   2081					(u32)((u64)cmd->sense_buffer_dma >> 32);
   2082	/* Fill fw input params */
   2083	task_params.context = fw_task_ctx;
   2084	task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
   2085	task_params.itid = tid;
   2086	task_params.cq_rss_number = cq_idx;
   2087	if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE)
   2088		task_params.tx_io_size = scsi_bufflen(sc);
   2089	else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ)
   2090		task_params.rx_io_size = scsi_bufflen(sc);
   2091
   2092	sq_idx = qedi_get_wqe_idx(qedi_conn);
   2093	task_params.sqe = &ep->sq[sq_idx];
   2094
   2095	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
   2096		  "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n",
   2097		  (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
   2098		  "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
   2099		  "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
   2100		  (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc),
   2101		  (u32)(cmd->io_tbl.sge_tbl_dma),
   2102		  (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
   2103
   2104	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
   2105
   2106	if (task_params.tx_io_size != 0)
   2107		ptx_sgl = &tx_sgl_task_params;
   2108	if (task_params.rx_io_size != 0)
   2109		prx_sgl = &rx_sgl_task_params;
   2110
   2111	rval = init_initiator_rw_iscsi_task(&task_params, &conn_params,
   2112					    &cmd_params, &cmd_pdu_header,
   2113					    ptx_sgl, prx_sgl,
   2114					    NULL);
   2115	if (rval)
   2116		return -1;
   2117
   2118	spin_lock(&qedi_conn->list_lock);
   2119	list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
   2120	cmd->io_cmd_in_list = true;
   2121	qedi_conn->active_cmd_count++;
   2122	spin_unlock(&qedi_conn->list_lock);
   2123
   2124	qedi_ring_doorbell(qedi_conn);
   2125	return 0;
   2126}
   2127
   2128int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
   2129{
   2130	struct iscsi_task_params task_params;
   2131	struct qedi_endpoint *ep;
   2132	struct iscsi_conn *conn = task->conn;
   2133	struct qedi_conn *qedi_conn = conn->dd_data;
   2134	struct qedi_cmd *cmd = task->dd_data;
   2135	u16 sq_idx = 0;
   2136	int rval = 0;
   2137
   2138	QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
   2139		  "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
   2140		  cmd->task_id, get_itt(task->itt), task->state,
   2141		  cmd->state, qedi_conn->iscsi_conn_id);
   2142
   2143	memset(&task_params, 0, sizeof(task_params));
   2144	ep = qedi_conn->ep;
   2145
   2146	sq_idx = qedi_get_wqe_idx(qedi_conn);
   2147
   2148	task_params.sqe = &ep->sq[sq_idx];
   2149	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
   2150	task_params.itid = cmd->task_id;
   2151
   2152	rval = init_cleanup_task(&task_params);
   2153	if (rval)
   2154		return rval;
   2155
   2156	qedi_ring_doorbell(qedi_conn);
   2157	return 0;
   2158}