cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qedf_els.c (28408B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  QLogic FCoE Offload Driver
      4 *  Copyright (c) 2016-2018 Cavium Inc.
      5 */
      6#include "qedf.h"
      7
      8/* It's assumed that the lock is held when calling this function. */
      9static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
     10	void *data, uint32_t data_len,
     11	void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
     12	struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
     13{
     14	struct qedf_ctx *qedf;
     15	struct fc_lport *lport;
     16	struct qedf_ioreq *els_req;
     17	struct qedf_mp_req *mp_req;
     18	struct fc_frame_header *fc_hdr;
     19	struct fcoe_task_context *task;
     20	int rc = 0;
     21	uint32_t did, sid;
     22	uint16_t xid;
     23	struct fcoe_wqe *sqe;
     24	unsigned long flags;
     25	u16 sqe_idx;
     26
     27	if (!fcport) {
     28		QEDF_ERR(NULL, "fcport is NULL");
     29		rc = -EINVAL;
     30		goto els_err;
     31	}
     32
     33	qedf = fcport->qedf;
     34	lport = qedf->lport;
     35
     36	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
     37
     38	rc = fc_remote_port_chkready(fcport->rport);
     39	if (rc) {
     40		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
     41		rc = -EAGAIN;
     42		goto els_err;
     43	}
     44	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
     45		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
     46			  op);
     47		rc = -EAGAIN;
     48		goto els_err;
     49	}
     50
     51	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
     52		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
     53		rc = -EINVAL;
     54		goto els_err;
     55	}
     56
     57	els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
     58	if (!els_req) {
     59		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
     60			  "Failed to alloc ELS request 0x%x\n", op);
     61		rc = -ENOMEM;
     62		goto els_err;
     63	}
     64
     65	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
     66		   "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
     67		   els_req->xid);
     68	els_req->sc_cmd = NULL;
     69	els_req->cmd_type = QEDF_ELS;
     70	els_req->fcport = fcport;
     71	els_req->cb_func = cb_func;
     72	cb_arg->io_req = els_req;
     73	cb_arg->op = op;
     74	els_req->cb_arg = cb_arg;
     75	els_req->data_xfer_len = data_len;
     76
     77	/* Record which cpu this request is associated with */
     78	els_req->cpu = smp_processor_id();
     79
     80	mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
     81	rc = qedf_init_mp_req(els_req);
     82	if (rc) {
     83		QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
     84		kref_put(&els_req->refcount, qedf_release_cmd);
     85		goto els_err;
     86	} else {
     87		rc = 0;
     88	}
     89
     90	/* Fill ELS Payload */
     91	if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
     92		memcpy(mp_req->req_buf, data, data_len);
     93	} else {
     94		QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
     95		els_req->cb_func = NULL;
     96		els_req->cb_arg = NULL;
     97		kref_put(&els_req->refcount, qedf_release_cmd);
     98		rc = -EINVAL;
     99	}
    100
    101	if (rc)
    102		goto els_err;
    103
    104	/* Fill FC header */
    105	fc_hdr = &(mp_req->req_fc_hdr);
    106
    107	did = fcport->rdata->ids.port_id;
    108	sid = fcport->sid;
    109
    110	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
    111			   FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
    112			   FC_FC_SEQ_INIT, 0);
    113
    114	/* Obtain exchange id */
    115	xid = els_req->xid;
    116
    117	spin_lock_irqsave(&fcport->rport_lock, flags);
    118
    119	sqe_idx = qedf_get_sqe_idx(fcport);
    120	sqe = &fcport->sq[sqe_idx];
    121	memset(sqe, 0, sizeof(struct fcoe_wqe));
    122
    123	/* Initialize task context for this IO request */
    124	task = qedf_get_task_mem(&qedf->tasks, xid);
    125	qedf_init_mp_task(els_req, task, sqe);
    126
    127	/* Put timer on els request */
    128	if (timer_msec)
    129		qedf_cmd_timer_set(qedf, els_req, timer_msec);
    130
    131	/* Ring doorbell */
    132	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
    133		   "req\n");
    134	qedf_ring_doorbell(fcport);
    135	set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
    136
    137	spin_unlock_irqrestore(&fcport->rport_lock, flags);
    138els_err:
    139	return rc;
    140}
    141
    142void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
    143	struct qedf_ioreq *els_req)
    144{
    145	struct fcoe_cqe_midpath_info *mp_info;
    146	struct qedf_rport *fcport;
    147
    148	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
    149		   " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
    150
    151	if ((els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
    152		|| (els_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS)
    153		|| (els_req->event == QEDF_IOREQ_EV_CLEANUP_FAILED)) {
    154		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
    155			"ELS completion xid=0x%x after flush event=0x%x",
    156			els_req->xid, els_req->event);
    157		return;
    158	}
    159
    160	fcport = els_req->fcport;
    161
    162	/* When flush is active,
    163	 * let the cmds be completed from the cleanup context
    164	 */
    165	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
    166		test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
    167		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
    168			"Dropping ELS completion xid=0x%x as fcport is flushing",
    169			els_req->xid);
    170		return;
    171	}
    172
    173	clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
    174
    175	/* Kill the ELS timer */
    176	cancel_delayed_work(&els_req->timeout_work);
    177
    178	/* Get ELS response length from CQE */
    179	mp_info = &cqe->cqe_info.midpath_info;
    180	els_req->mp_req.resp_len = mp_info->data_placement_size;
    181
    182	/* Parse ELS response */
    183	if ((els_req->cb_func) && (els_req->cb_arg)) {
    184		els_req->cb_func(els_req->cb_arg);
    185		els_req->cb_arg = NULL;
    186	}
    187
    188	kref_put(&els_req->refcount, qedf_release_cmd);
    189}
    190
    191static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
    192{
    193	struct qedf_ioreq *orig_io_req;
    194	struct qedf_ioreq *rrq_req;
    195	struct qedf_ctx *qedf;
    196	int refcount;
    197
    198	rrq_req = cb_arg->io_req;
    199	qedf = rrq_req->fcport->qedf;
    200
    201	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
    202
    203	orig_io_req = cb_arg->aborted_io_req;
    204
    205	if (!orig_io_req) {
    206		QEDF_ERR(&qedf->dbg_ctx,
    207			 "Original io_req is NULL, rrq_req = %p.\n", rrq_req);
    208		goto out_free;
    209	}
    210
    211	refcount = kref_read(&orig_io_req->refcount);
    212	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
    213		   " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
    214		   orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
    215
    216	/*
    217	 * This should return the aborted io_req to the command pool. Note that
    218	 * we need to check the refcound in case the original request was
    219	 * flushed but we get a completion on this xid.
    220	 */
    221	if (orig_io_req && refcount > 0)
    222		kref_put(&orig_io_req->refcount, qedf_release_cmd);
    223
    224out_free:
    225	/*
    226	 * Release a reference to the rrq request if we timed out as the
    227	 * rrq completion handler is called directly from the timeout handler
    228	 * and not from els_compl where the reference would have normally been
    229	 * released.
    230	 */
    231	if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
    232		kref_put(&rrq_req->refcount, qedf_release_cmd);
    233	kfree(cb_arg);
    234}
    235
    236/* Assumes kref is already held by caller */
    237int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
    238{
    239
    240	struct fc_els_rrq rrq;
    241	struct qedf_rport *fcport;
    242	struct fc_lport *lport;
    243	struct qedf_els_cb_arg *cb_arg = NULL;
    244	struct qedf_ctx *qedf;
    245	uint32_t sid;
    246	uint32_t r_a_tov;
    247	int rc;
    248	int refcount;
    249
    250	if (!aborted_io_req) {
    251		QEDF_ERR(NULL, "abort_io_req is NULL.\n");
    252		return -EINVAL;
    253	}
    254
    255	fcport = aborted_io_req->fcport;
    256
    257	if (!fcport) {
    258		refcount = kref_read(&aborted_io_req->refcount);
    259		QEDF_ERR(NULL,
    260			 "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
    261			 aborted_io_req->xid, refcount);
    262		kref_put(&aborted_io_req->refcount, qedf_release_cmd);
    263		return -EINVAL;
    264	}
    265
    266	/* Check that fcport is still offloaded */
    267	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
    268		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
    269		return -EINVAL;
    270	}
    271
    272	if (!fcport->qedf) {
    273		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
    274		return -EINVAL;
    275	}
    276
    277	qedf = fcport->qedf;
    278
    279	/*
    280	 * Sanity check that we can send a RRQ to make sure that refcount isn't
    281	 * 0
    282	 */
    283	refcount = kref_read(&aborted_io_req->refcount);
    284	if (refcount != 1) {
    285		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
    286			  "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
    287			  aborted_io_req->xid, aborted_io_req, refcount);
    288		return -EINVAL;
    289	}
    290
    291	lport = qedf->lport;
    292	sid = fcport->sid;
    293	r_a_tov = lport->r_a_tov;
    294
    295	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
    296		   "io = %p, orig_xid = 0x%x\n", aborted_io_req,
    297		   aborted_io_req->xid);
    298	memset(&rrq, 0, sizeof(rrq));
    299
    300	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
    301	if (!cb_arg) {
    302		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
    303			  "RRQ\n");
    304		rc = -ENOMEM;
    305		goto rrq_err;
    306	}
    307
    308	cb_arg->aborted_io_req = aborted_io_req;
    309
    310	rrq.rrq_cmd = ELS_RRQ;
    311	hton24(rrq.rrq_s_id, sid);
    312	rrq.rrq_ox_id = htons(aborted_io_req->xid);
    313	rrq.rrq_rx_id =
    314	    htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
    315
    316	rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
    317	    qedf_rrq_compl, cb_arg, r_a_tov);
    318
    319rrq_err:
    320	if (rc) {
    321		QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
    322			  "req 0x%x\n", aborted_io_req->xid);
    323		kfree(cb_arg);
    324		kref_put(&aborted_io_req->refcount, qedf_release_cmd);
    325	}
    326	return rc;
    327}
    328
    329static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
    330					struct fc_frame *fp,
    331					u16 l2_oxid)
    332{
    333	struct fc_lport *lport = fcport->qedf->lport;
    334	struct fc_frame_header *fh;
    335	u32 crc;
    336
    337	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
    338
    339	/* Set the OXID we return to what libfc used */
    340	if (l2_oxid != FC_XID_UNKNOWN)
    341		fh->fh_ox_id = htons(l2_oxid);
    342
    343	/* Setup header fields */
    344	fh->fh_r_ctl = FC_RCTL_ELS_REP;
    345	fh->fh_type = FC_TYPE_ELS;
    346	/* Last sequence, end sequence */
    347	fh->fh_f_ctl[0] = 0x98;
    348	hton24(fh->fh_d_id, lport->port_id);
    349	hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
    350	fh->fh_rx_id = 0xffff;
    351
    352	/* Set frame attributes */
    353	crc = fcoe_fc_crc(fp);
    354	fc_frame_init(fp);
    355	fr_dev(fp) = lport;
    356	fr_sof(fp) = FC_SOF_I3;
    357	fr_eof(fp) = FC_EOF_T;
    358	fr_crc(fp) = cpu_to_le32(~crc);
    359
    360	/* Send completed request to libfc */
    361	fc_exch_recv(lport, fp);
    362}
    363
    364/*
    365 * In instances where an ELS command times out we may need to restart the
    366 * rport by logging out and then logging back in.
    367 */
    368void qedf_restart_rport(struct qedf_rport *fcport)
    369{
    370	struct fc_lport *lport;
    371	struct fc_rport_priv *rdata;
    372	u32 port_id;
    373	unsigned long flags;
    374
    375	if (!fcport) {
    376		QEDF_ERR(NULL, "fcport is NULL.\n");
    377		return;
    378	}
    379
    380	spin_lock_irqsave(&fcport->rport_lock, flags);
    381	if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
    382	    !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
    383	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
    384		QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
    385		    fcport);
    386		spin_unlock_irqrestore(&fcport->rport_lock, flags);
    387		return;
    388	}
    389
    390	/* Set that we are now in reset */
    391	set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
    392	spin_unlock_irqrestore(&fcport->rport_lock, flags);
    393
    394	rdata = fcport->rdata;
    395	if (rdata && !kref_get_unless_zero(&rdata->kref)) {
    396		fcport->rdata = NULL;
    397		rdata = NULL;
    398	}
    399
    400	if (rdata && rdata->rp_state == RPORT_ST_READY) {
    401		lport = fcport->qedf->lport;
    402		port_id = rdata->ids.port_id;
    403		QEDF_ERR(&(fcport->qedf->dbg_ctx),
    404		    "LOGO port_id=%x.\n", port_id);
    405		fc_rport_logoff(rdata);
    406		kref_put(&rdata->kref, fc_rport_destroy);
    407		mutex_lock(&lport->disc.disc_mutex);
    408		/* Recreate the rport and log back in */
    409		rdata = fc_rport_create(lport, port_id);
    410		mutex_unlock(&lport->disc.disc_mutex);
    411		if (rdata)
    412			fc_rport_login(rdata);
    413		fcport->rdata = rdata;
    414	}
    415	clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
    416}
    417
    418static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
    419{
    420	struct qedf_ioreq *els_req;
    421	struct qedf_rport *fcport;
    422	struct qedf_mp_req *mp_req;
    423	struct fc_frame *fp;
    424	struct fc_frame_header *fh, *mp_fc_hdr;
    425	void *resp_buf, *fc_payload;
    426	u32 resp_len;
    427	u16 l2_oxid;
    428
    429	l2_oxid = cb_arg->l2_oxid;
    430	els_req = cb_arg->io_req;
    431
    432	if (!els_req) {
    433		QEDF_ERR(NULL, "els_req is NULL.\n");
    434		goto free_arg;
    435	}
    436
    437	/*
    438	 * If we are flushing the command just free the cb_arg as none of the
    439	 * response data will be valid.
    440	 */
    441	if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) {
    442		QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n",
    443			 els_req->xid);
    444		goto free_arg;
    445	}
    446
    447	fcport = els_req->fcport;
    448	mp_req = &(els_req->mp_req);
    449	mp_fc_hdr = &(mp_req->resp_fc_hdr);
    450	resp_len = mp_req->resp_len;
    451	resp_buf = mp_req->resp_buf;
    452
    453	/*
    454	 * If a middle path ELS command times out, don't try to return
    455	 * the command but rather do any internal cleanup and then libfc
    456	 * timeout the command and clean up its internal resources.
    457	 */
    458	if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
    459		/*
    460		 * If ADISC times out, libfc will timeout the exchange and then
    461		 * try to send a PLOGI which will timeout since the session is
    462		 * still offloaded.  Force libfc to logout the session which
    463		 * will offload the connection and allow the PLOGI response to
    464		 * flow over the LL2 path.
    465		 */
    466		if (cb_arg->op == ELS_ADISC)
    467			qedf_restart_rport(fcport);
    468		return;
    469	}
    470
    471	if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
    472		QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
    473		   "beyond page size.\n");
    474		goto free_arg;
    475	}
    476
    477	fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
    478	if (!fp) {
    479		QEDF_ERR(&(fcport->qedf->dbg_ctx),
    480		    "fc_frame_alloc failure.\n");
    481		return;
    482	}
    483
    484	/* Copy frame header from firmware into fp */
    485	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
    486	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
    487
    488	/* Copy payload from firmware into fp */
    489	fc_payload = fc_frame_payload_get(fp, resp_len);
    490	memcpy(fc_payload, resp_buf, resp_len);
    491
    492	QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
    493	    "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
    494	qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
    495
    496free_arg:
    497	kfree(cb_arg);
    498}
    499
    500int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
    501{
    502	struct fc_els_adisc *adisc;
    503	struct fc_frame_header *fh;
    504	struct fc_lport *lport = fcport->qedf->lport;
    505	struct qedf_els_cb_arg *cb_arg = NULL;
    506	struct qedf_ctx *qedf;
    507	uint32_t r_a_tov = lport->r_a_tov;
    508	int rc;
    509
    510	qedf = fcport->qedf;
    511	fh = fc_frame_header_get(fp);
    512
    513	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
    514	if (!cb_arg) {
    515		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
    516			  "ADISC\n");
    517		rc = -ENOMEM;
    518		goto adisc_err;
    519	}
    520	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
    521
    522	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
    523	    "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
    524
    525	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
    526
    527	rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
    528	    qedf_l2_els_compl, cb_arg, r_a_tov);
    529
    530adisc_err:
    531	if (rc) {
    532		QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
    533		kfree(cb_arg);
    534	}
    535	return rc;
    536}
    537
    538static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
    539{
    540	struct qedf_ioreq *orig_io_req;
    541	struct qedf_ioreq *srr_req;
    542	struct qedf_mp_req *mp_req;
    543	struct fc_frame_header *mp_fc_hdr, *fh;
    544	struct fc_frame *fp;
    545	void *resp_buf, *fc_payload;
    546	u32 resp_len;
    547	struct fc_lport *lport;
    548	struct qedf_ctx *qedf;
    549	int refcount;
    550	u8 opcode;
    551
    552	srr_req = cb_arg->io_req;
    553	qedf = srr_req->fcport->qedf;
    554	lport = qedf->lport;
    555
    556	orig_io_req = cb_arg->aborted_io_req;
    557
    558	if (!orig_io_req) {
    559		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
    560		goto out_free;
    561	}
    562
    563	clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
    564
    565	if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
    566	    srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
    567		cancel_delayed_work_sync(&orig_io_req->timeout_work);
    568
    569	refcount = kref_read(&orig_io_req->refcount);
    570	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
    571		   " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
    572		   orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
    573
    574	/* If a SRR times out, simply free resources */
    575	if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) {
    576		QEDF_ERR(&qedf->dbg_ctx,
    577			 "ELS timeout rec_xid=0x%x.\n", srr_req->xid);
    578		goto out_put;
    579	}
    580
    581	/* Normalize response data into struct fc_frame */
    582	mp_req = &(srr_req->mp_req);
    583	mp_fc_hdr = &(mp_req->resp_fc_hdr);
    584	resp_len = mp_req->resp_len;
    585	resp_buf = mp_req->resp_buf;
    586
    587	fp = fc_frame_alloc(lport, resp_len);
    588	if (!fp) {
    589		QEDF_ERR(&(qedf->dbg_ctx),
    590		    "fc_frame_alloc failure.\n");
    591		goto out_put;
    592	}
    593
    594	/* Copy frame header from firmware into fp */
    595	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
    596	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
    597
    598	/* Copy payload from firmware into fp */
    599	fc_payload = fc_frame_payload_get(fp, resp_len);
    600	memcpy(fc_payload, resp_buf, resp_len);
    601
    602	opcode = fc_frame_payload_op(fp);
    603	switch (opcode) {
    604	case ELS_LS_ACC:
    605		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
    606		    "SRR success.\n");
    607		break;
    608	case ELS_LS_RJT:
    609		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
    610		    "SRR rejected.\n");
    611		qedf_initiate_abts(orig_io_req, true);
    612		break;
    613	}
    614
    615	fc_frame_free(fp);
    616out_put:
    617	/* Put reference for original command since SRR completed */
    618	kref_put(&orig_io_req->refcount, qedf_release_cmd);
    619out_free:
    620	kfree(cb_arg);
    621}
    622
    623static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
    624{
    625	struct fcp_srr srr;
    626	struct qedf_ctx *qedf;
    627	struct qedf_rport *fcport;
    628	struct fc_lport *lport;
    629	struct qedf_els_cb_arg *cb_arg = NULL;
    630	u32 r_a_tov;
    631	int rc;
    632
    633	if (!orig_io_req) {
    634		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
    635		return -EINVAL;
    636	}
    637
    638	fcport = orig_io_req->fcport;
    639
    640	/* Check that fcport is still offloaded */
    641	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
    642		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
    643		return -EINVAL;
    644	}
    645
    646	if (!fcport->qedf) {
    647		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
    648		return -EINVAL;
    649	}
    650
    651	/* Take reference until SRR command completion */
    652	kref_get(&orig_io_req->refcount);
    653
    654	qedf = fcport->qedf;
    655	lport = qedf->lport;
    656	r_a_tov = lport->r_a_tov;
    657
    658	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
    659		   "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
    660	memset(&srr, 0, sizeof(srr));
    661
    662	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
    663	if (!cb_arg) {
    664		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
    665			  "SRR\n");
    666		rc = -ENOMEM;
    667		goto srr_err;
    668	}
    669
    670	cb_arg->aborted_io_req = orig_io_req;
    671
    672	srr.srr_op = ELS_SRR;
    673	srr.srr_ox_id = htons(orig_io_req->xid);
    674	srr.srr_rx_id = htons(orig_io_req->rx_id);
    675	srr.srr_rel_off = htonl(offset);
    676	srr.srr_r_ctl = r_ctl;
    677
    678	rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
    679	    qedf_srr_compl, cb_arg, r_a_tov);
    680
    681srr_err:
    682	if (rc) {
    683		QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
    684			  "=0x%x\n", orig_io_req->xid);
    685		kfree(cb_arg);
    686		/* If we fail to queue SRR, send ABTS to orig_io */
    687		qedf_initiate_abts(orig_io_req, true);
    688		kref_put(&orig_io_req->refcount, qedf_release_cmd);
    689	} else
    690		/* Tell other threads that SRR is in progress */
    691		set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
    692
    693	return rc;
    694}
    695
    696static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
    697	u32 offset, u8 r_ctl)
    698{
    699	struct qedf_rport *fcport;
    700	unsigned long flags;
    701	struct qedf_els_cb_arg *cb_arg;
    702	struct fcoe_wqe *sqe;
    703	u16 sqe_idx;
    704
    705	fcport = orig_io_req->fcport;
    706
    707	QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
    708	    "Doing sequence cleanup for xid=0x%x offset=%u.\n",
    709	    orig_io_req->xid, offset);
    710
    711	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
    712	if (!cb_arg) {
    713		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
    714			  "for sequence cleanup\n");
    715		return;
    716	}
    717
    718	/* Get reference for cleanup request */
    719	kref_get(&orig_io_req->refcount);
    720
    721	orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
    722	cb_arg->offset = offset;
    723	cb_arg->r_ctl = r_ctl;
    724	orig_io_req->cb_arg = cb_arg;
    725
    726	qedf_cmd_timer_set(fcport->qedf, orig_io_req,
    727	    QEDF_CLEANUP_TIMEOUT * HZ);
    728
    729	spin_lock_irqsave(&fcport->rport_lock, flags);
    730
    731	sqe_idx = qedf_get_sqe_idx(fcport);
    732	sqe = &fcport->sq[sqe_idx];
    733	memset(sqe, 0, sizeof(struct fcoe_wqe));
    734	orig_io_req->task_params->sqe = sqe;
    735
    736	init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
    737						   offset);
    738	qedf_ring_doorbell(fcport);
    739
    740	spin_unlock_irqrestore(&fcport->rport_lock, flags);
    741}
    742
    743void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
    744	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
    745{
    746	int rc;
    747	struct qedf_els_cb_arg *cb_arg;
    748
    749	cb_arg = io_req->cb_arg;
    750
    751	/* If we timed out just free resources */
    752	if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) {
    753		QEDF_ERR(&qedf->dbg_ctx,
    754			 "cqe is NULL or timeout event (0x%x)", io_req->event);
    755		goto free;
    756	}
    757
    758	/* Kill the timer we put on the request */
    759	cancel_delayed_work_sync(&io_req->timeout_work);
    760
    761	rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
    762	if (rc)
    763		QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
    764		    "abort, xid=0x%x.\n", io_req->xid);
    765free:
    766	kfree(cb_arg);
    767	kref_put(&io_req->refcount, qedf_release_cmd);
    768}
    769
    770static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
    771{
    772	struct qedf_rport *fcport;
    773	struct qedf_ioreq *new_io_req;
    774	unsigned long flags;
    775	bool rc = false;
    776
    777	fcport = orig_io_req->fcport;
    778	if (!fcport) {
    779		QEDF_ERR(NULL, "fcport is NULL.\n");
    780		goto out;
    781	}
    782
    783	if (!orig_io_req->sc_cmd) {
    784		QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
    785		    "xid=0x%x.\n", orig_io_req->xid);
    786		goto out;
    787	}
    788
    789	new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
    790	if (!new_io_req) {
    791		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
    792		    "io_req.\n");
    793		goto out;
    794	}
    795
    796	new_io_req->sc_cmd = orig_io_req->sc_cmd;
    797
    798	/*
    799	 * This keeps the sc_cmd struct from being returned to the tape
    800	 * driver and being requeued twice. We do need to put a reference
    801	 * for the original I/O request since we will not do a SCSI completion
    802	 * for it.
    803	 */
    804	orig_io_req->sc_cmd = NULL;
    805	kref_put(&orig_io_req->refcount, qedf_release_cmd);
    806
    807	spin_lock_irqsave(&fcport->rport_lock, flags);
    808
    809	/* kref for new command released in qedf_post_io_req on error */
    810	if (qedf_post_io_req(fcport, new_io_req)) {
    811		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
    812		/* Return SQE to pool */
    813		atomic_inc(&fcport->free_sqes);
    814	} else {
    815		QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
    816		    "Reissued SCSI command from  orig_xid=0x%x on "
    817		    "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
    818		/*
    819		 * Abort the original I/O but do not return SCSI command as
    820		 * it has been reissued on another OX_ID.
    821		 */
    822		spin_unlock_irqrestore(&fcport->rport_lock, flags);
    823		qedf_initiate_abts(orig_io_req, false);
    824		goto out;
    825	}
    826
    827	spin_unlock_irqrestore(&fcport->rport_lock, flags);
    828out:
    829	return rc;
    830}
    831
    832
    833static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
    834{
    835	struct qedf_ioreq *orig_io_req;
    836	struct qedf_ioreq *rec_req;
    837	struct qedf_mp_req *mp_req;
    838	struct fc_frame_header *mp_fc_hdr, *fh;
    839	struct fc_frame *fp;
    840	void *resp_buf, *fc_payload;
    841	u32 resp_len;
    842	struct fc_lport *lport;
    843	struct qedf_ctx *qedf;
    844	int refcount;
    845	enum fc_rctl r_ctl;
    846	struct fc_els_ls_rjt *rjt;
    847	struct fc_els_rec_acc *acc;
    848	u8 opcode;
    849	u32 offset, e_stat;
    850	struct scsi_cmnd *sc_cmd;
    851	bool srr_needed = false;
    852
    853	rec_req = cb_arg->io_req;
    854	qedf = rec_req->fcport->qedf;
    855	lport = qedf->lport;
    856
    857	orig_io_req = cb_arg->aborted_io_req;
    858
    859	if (!orig_io_req) {
    860		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
    861		goto out_free;
    862	}
    863
    864	if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
    865	    rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
    866		cancel_delayed_work_sync(&orig_io_req->timeout_work);
    867
    868	refcount = kref_read(&orig_io_req->refcount);
    869	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
    870		   " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
    871		   orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
    872
    873	/* If a REC times out, free resources */
    874	if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) {
    875		QEDF_ERR(&qedf->dbg_ctx,
    876			 "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n",
    877			 orig_io_req, orig_io_req->xid);
    878		goto out_put;
    879	}
    880
    881	/* Normalize response data into struct fc_frame */
    882	mp_req = &(rec_req->mp_req);
    883	mp_fc_hdr = &(mp_req->resp_fc_hdr);
    884	resp_len = mp_req->resp_len;
    885	acc = resp_buf = mp_req->resp_buf;
    886
    887	fp = fc_frame_alloc(lport, resp_len);
    888	if (!fp) {
    889		QEDF_ERR(&(qedf->dbg_ctx),
    890		    "fc_frame_alloc failure.\n");
    891		goto out_put;
    892	}
    893
    894	/* Copy frame header from firmware into fp */
    895	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
    896	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
    897
    898	/* Copy payload from firmware into fp */
    899	fc_payload = fc_frame_payload_get(fp, resp_len);
    900	memcpy(fc_payload, resp_buf, resp_len);
    901
    902	opcode = fc_frame_payload_op(fp);
    903	if (opcode == ELS_LS_RJT) {
    904		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
    905		if (!rjt) {
    906			QEDF_ERR(&qedf->dbg_ctx, "payload get failed");
    907			goto out_free_frame;
    908		}
    909
    910		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
    911		    "Received LS_RJT for REC: er_reason=0x%x, "
    912		    "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
    913		/*
    914		 * The following response(s) mean that we need to reissue the
    915		 * request on another exchange.  We need to do this without
    916		 * informing the upper layers lest it cause an application
    917		 * error.
    918		 */
    919		if ((rjt->er_reason == ELS_RJT_LOGIC ||
    920		    rjt->er_reason == ELS_RJT_UNAB) &&
    921		    rjt->er_explan == ELS_EXPL_OXID_RXID) {
    922			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
    923			    "Handle CMD LOST case.\n");
    924			qedf_requeue_io_req(orig_io_req);
    925		}
    926	} else if (opcode == ELS_LS_ACC) {
    927		offset = ntohl(acc->reca_fc4value);
    928		e_stat = ntohl(acc->reca_e_stat);
    929		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
    930		    "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
    931		    offset, e_stat);
    932		if (e_stat & ESB_ST_SEQ_INIT)  {
    933			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
    934			    "Target has the seq init\n");
    935			goto out_free_frame;
    936		}
    937		sc_cmd = orig_io_req->sc_cmd;
    938		if (!sc_cmd) {
    939			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
    940			    "sc_cmd is NULL for xid=0x%x.\n",
    941			    orig_io_req->xid);
    942			goto out_free_frame;
    943		}
    944		/* SCSI write case */
    945		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
    946			if (offset == orig_io_req->data_xfer_len) {
    947				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
    948				    "WRITE - response lost.\n");
    949				r_ctl = FC_RCTL_DD_CMD_STATUS;
    950				srr_needed = true;
    951				offset = 0;
    952			} else {
    953				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
    954				    "WRITE - XFER_RDY/DATA lost.\n");
    955				r_ctl = FC_RCTL_DD_DATA_DESC;
    956				/* Use data from warning CQE instead of REC */
    957				offset = orig_io_req->tx_buf_off;
    958			}
    959		/* SCSI read case */
    960		} else {
    961			if (orig_io_req->rx_buf_off ==
    962			    orig_io_req->data_xfer_len) {
    963				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
    964				    "READ - response lost.\n");
    965				srr_needed = true;
    966				r_ctl = FC_RCTL_DD_CMD_STATUS;
    967				offset = 0;
    968			} else {
    969				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
    970				    "READ - DATA lost.\n");
    971				/*
    972				 * For read case we always set the offset to 0
    973				 * for sequence recovery task.
    974				 */
    975				offset = 0;
    976				r_ctl = FC_RCTL_DD_SOL_DATA;
    977			}
    978		}
    979
    980		if (srr_needed)
    981			qedf_send_srr(orig_io_req, offset, r_ctl);
    982		else
    983			qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
    984	}
    985
    986out_free_frame:
    987	fc_frame_free(fp);
    988out_put:
    989	/* Put reference for original command since REC completed */
    990	kref_put(&orig_io_req->refcount, qedf_release_cmd);
    991out_free:
    992	kfree(cb_arg);
    993}
    994
    995/* Assumes kref is already held by caller */
    996int qedf_send_rec(struct qedf_ioreq *orig_io_req)
    997{
    998
    999	struct fc_els_rec rec;
   1000	struct qedf_rport *fcport;
   1001	struct fc_lport *lport;
   1002	struct qedf_els_cb_arg *cb_arg = NULL;
   1003	struct qedf_ctx *qedf;
   1004	uint32_t sid;
   1005	uint32_t r_a_tov;
   1006	int rc;
   1007
   1008	if (!orig_io_req) {
   1009		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
   1010		return -EINVAL;
   1011	}
   1012
   1013	fcport = orig_io_req->fcport;
   1014
   1015	/* Check that fcport is still offloaded */
   1016	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
   1017		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
   1018		return -EINVAL;
   1019	}
   1020
   1021	if (!fcport->qedf) {
   1022		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
   1023		return -EINVAL;
   1024	}
   1025
   1026	/* Take reference until REC command completion */
   1027	kref_get(&orig_io_req->refcount);
   1028
   1029	qedf = fcport->qedf;
   1030	lport = qedf->lport;
   1031	sid = fcport->sid;
   1032	r_a_tov = lport->r_a_tov;
   1033
   1034	memset(&rec, 0, sizeof(rec));
   1035
   1036	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
   1037	if (!cb_arg) {
   1038		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
   1039			  "REC\n");
   1040		rc = -ENOMEM;
   1041		goto rec_err;
   1042	}
   1043
   1044	cb_arg->aborted_io_req = orig_io_req;
   1045
   1046	rec.rec_cmd = ELS_REC;
   1047	hton24(rec.rec_s_id, sid);
   1048	rec.rec_ox_id = htons(orig_io_req->xid);
   1049	rec.rec_rx_id =
   1050	    htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
   1051
   1052	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
   1053	   "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
   1054	   orig_io_req->xid, rec.rec_rx_id);
   1055	rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
   1056	    qedf_rec_compl, cb_arg, r_a_tov);
   1057
   1058rec_err:
   1059	if (rc) {
   1060		QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
   1061			  "=0x%x\n", orig_io_req->xid);
   1062		kfree(cb_arg);
   1063		kref_put(&orig_io_req->refcount, qedf_release_cmd);
   1064	}
   1065	return rc;
   1066}