cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

iscsi_target_util.c (34963B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*******************************************************************************
      3 * This file contains the iSCSI Target specific utility functions.
      4 *
      5 * (c) Copyright 2007-2013 Datera, Inc.
      6 *
      7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
      8 *
      9 ******************************************************************************/
     10
     11#include <linux/list.h>
     12#include <linux/sched/signal.h>
     13#include <net/ipv6.h>         /* ipv6_addr_equal() */
     14#include <scsi/scsi_tcq.h>
     15#include <scsi/iscsi_proto.h>
     16#include <target/target_core_base.h>
     17#include <target/target_core_fabric.h>
     18#include <target/iscsi/iscsi_transport.h>
     19
     20#include <target/iscsi/iscsi_target_core.h>
     21#include "iscsi_target_parameters.h"
     22#include "iscsi_target_seq_pdu_list.h"
     23#include "iscsi_target_datain_values.h"
     24#include "iscsi_target_erl0.h"
     25#include "iscsi_target_erl1.h"
     26#include "iscsi_target_erl2.h"
     27#include "iscsi_target_tpg.h"
     28#include "iscsi_target_util.h"
     29#include "iscsi_target.h"
     30
     31extern struct list_head g_tiqn_list;
     32extern spinlock_t tiqn_lock;
     33
     34int iscsit_add_r2t_to_list(
     35	struct iscsit_cmd *cmd,
     36	u32 offset,
     37	u32 xfer_len,
     38	int recovery,
     39	u32 r2t_sn)
     40{
     41	struct iscsi_r2t *r2t;
     42
     43	lockdep_assert_held(&cmd->r2t_lock);
     44
     45	WARN_ON_ONCE((s32)xfer_len < 0);
     46
     47	r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
     48	if (!r2t) {
     49		pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
     50		return -1;
     51	}
     52	INIT_LIST_HEAD(&r2t->r2t_list);
     53
     54	r2t->recovery_r2t = recovery;
     55	r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
     56	r2t->offset = offset;
     57	r2t->xfer_len = xfer_len;
     58	list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
     59	spin_unlock_bh(&cmd->r2t_lock);
     60
     61	iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
     62
     63	spin_lock_bh(&cmd->r2t_lock);
     64	return 0;
     65}
     66
     67struct iscsi_r2t *iscsit_get_r2t_for_eos(
     68	struct iscsit_cmd *cmd,
     69	u32 offset,
     70	u32 length)
     71{
     72	struct iscsi_r2t *r2t;
     73
     74	spin_lock_bh(&cmd->r2t_lock);
     75	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
     76		if ((r2t->offset <= offset) &&
     77		    (r2t->offset + r2t->xfer_len) >= (offset + length)) {
     78			spin_unlock_bh(&cmd->r2t_lock);
     79			return r2t;
     80		}
     81	}
     82	spin_unlock_bh(&cmd->r2t_lock);
     83
     84	pr_err("Unable to locate R2T for Offset: %u, Length:"
     85			" %u\n", offset, length);
     86	return NULL;
     87}
     88
     89struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *cmd)
     90{
     91	struct iscsi_r2t *r2t;
     92
     93	spin_lock_bh(&cmd->r2t_lock);
     94	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
     95		if (!r2t->sent_r2t) {
     96			spin_unlock_bh(&cmd->r2t_lock);
     97			return r2t;
     98		}
     99	}
    100	spin_unlock_bh(&cmd->r2t_lock);
    101
    102	pr_err("Unable to locate next R2T to send for ITT:"
    103			" 0x%08x.\n", cmd->init_task_tag);
    104	return NULL;
    105}
    106
    107void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsit_cmd *cmd)
    108{
    109	lockdep_assert_held(&cmd->r2t_lock);
    110
    111	list_del(&r2t->r2t_list);
    112	kmem_cache_free(lio_r2t_cache, r2t);
    113}
    114
    115void iscsit_free_r2ts_from_list(struct iscsit_cmd *cmd)
    116{
    117	struct iscsi_r2t *r2t, *r2t_tmp;
    118
    119	spin_lock_bh(&cmd->r2t_lock);
    120	list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
    121		iscsit_free_r2t(r2t, cmd);
    122	spin_unlock_bh(&cmd->r2t_lock);
    123}
    124
    125static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup)
    126{
    127	int tag = -1;
    128	DEFINE_SBQ_WAIT(wait);
    129	struct sbq_wait_state *ws;
    130	struct sbitmap_queue *sbq;
    131
    132	if (state == TASK_RUNNING)
    133		return tag;
    134
    135	sbq = &se_sess->sess_tag_pool;
    136	ws = &sbq->ws[0];
    137	for (;;) {
    138		sbitmap_prepare_to_wait(sbq, ws, &wait, state);
    139		if (signal_pending_state(state, current))
    140			break;
    141		tag = sbitmap_queue_get(sbq, cpup);
    142		if (tag >= 0)
    143			break;
    144		schedule();
    145	}
    146
    147	sbitmap_finish_wait(sbq, ws, &wait);
    148	return tag;
    149}
    150
    151/*
    152 * May be called from software interrupt (timer) context for allocating
    153 * iSCSI NopINs.
    154 */
    155struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *conn, int state)
    156{
    157	struct iscsit_cmd *cmd;
    158	struct se_session *se_sess = conn->sess->se_sess;
    159	int size, tag, cpu;
    160
    161	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
    162	if (tag < 0)
    163		tag = iscsit_wait_for_tag(se_sess, state, &cpu);
    164	if (tag < 0)
    165		return NULL;
    166
    167	size = sizeof(struct iscsit_cmd) + conn->conn_transport->priv_size;
    168	cmd = (struct iscsit_cmd *)(se_sess->sess_cmd_map + (tag * size));
    169	memset(cmd, 0, size);
    170
    171	cmd->se_cmd.map_tag = tag;
    172	cmd->se_cmd.map_cpu = cpu;
    173	cmd->conn = conn;
    174	cmd->data_direction = DMA_NONE;
    175	INIT_LIST_HEAD(&cmd->i_conn_node);
    176	INIT_LIST_HEAD(&cmd->datain_list);
    177	INIT_LIST_HEAD(&cmd->cmd_r2t_list);
    178	spin_lock_init(&cmd->datain_lock);
    179	spin_lock_init(&cmd->dataout_timeout_lock);
    180	spin_lock_init(&cmd->istate_lock);
    181	spin_lock_init(&cmd->error_lock);
    182	spin_lock_init(&cmd->r2t_lock);
    183	timer_setup(&cmd->dataout_timer, iscsit_handle_dataout_timeout, 0);
    184
    185	return cmd;
    186}
    187EXPORT_SYMBOL(iscsit_allocate_cmd);
    188
    189struct iscsi_seq *iscsit_get_seq_holder_for_datain(
    190	struct iscsit_cmd *cmd,
    191	u32 seq_send_order)
    192{
    193	u32 i;
    194
    195	for (i = 0; i < cmd->seq_count; i++)
    196		if (cmd->seq_list[i].seq_send_order == seq_send_order)
    197			return &cmd->seq_list[i];
    198
    199	return NULL;
    200}
    201
    202struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *cmd)
    203{
    204	u32 i;
    205
    206	if (!cmd->seq_list) {
    207		pr_err("struct iscsit_cmd->seq_list is NULL!\n");
    208		return NULL;
    209	}
    210
    211	for (i = 0; i < cmd->seq_count; i++) {
    212		if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
    213			continue;
    214		if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
    215			cmd->seq_send_order++;
    216			return &cmd->seq_list[i];
    217		}
    218	}
    219
    220	return NULL;
    221}
    222
    223struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
    224	struct iscsit_cmd *cmd,
    225	u32 r2t_sn)
    226{
    227	struct iscsi_r2t *r2t;
    228
    229	spin_lock_bh(&cmd->r2t_lock);
    230	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
    231		if (r2t->r2t_sn == r2t_sn) {
    232			spin_unlock_bh(&cmd->r2t_lock);
    233			return r2t;
    234		}
    235	}
    236	spin_unlock_bh(&cmd->r2t_lock);
    237
    238	return NULL;
    239}
    240
    241static inline int iscsit_check_received_cmdsn(struct iscsit_session *sess, u32 cmdsn)
    242{
    243	u32 max_cmdsn;
    244	int ret;
    245
    246	/*
    247	 * This is the proper method of checking received CmdSN against
    248	 * ExpCmdSN and MaxCmdSN values, as well as accounting for out
    249	 * or order CmdSNs due to multiple connection sessions and/or
    250	 * CRC failures.
    251	 */
    252	max_cmdsn = atomic_read(&sess->max_cmd_sn);
    253	if (iscsi_sna_gt(cmdsn, max_cmdsn)) {
    254		pr_err("Received CmdSN: 0x%08x is greater than"
    255		       " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn);
    256		ret = CMDSN_MAXCMDSN_OVERRUN;
    257
    258	} else if (cmdsn == sess->exp_cmd_sn) {
    259		sess->exp_cmd_sn++;
    260		pr_debug("Received CmdSN matches ExpCmdSN,"
    261		      " incremented ExpCmdSN to: 0x%08x\n",
    262		      sess->exp_cmd_sn);
    263		ret = CMDSN_NORMAL_OPERATION;
    264
    265	} else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
    266		pr_debug("Received CmdSN: 0x%08x is greater"
    267		      " than ExpCmdSN: 0x%08x, not acknowledging.\n",
    268		      cmdsn, sess->exp_cmd_sn);
    269		ret = CMDSN_HIGHER_THAN_EXP;
    270
    271	} else {
    272		pr_err("Received CmdSN: 0x%08x is less than"
    273		       " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
    274		       sess->exp_cmd_sn);
    275		ret = CMDSN_LOWER_THAN_EXP;
    276	}
    277
    278	return ret;
    279}
    280
    281/*
    282 * Commands may be received out of order if MC/S is in use.
    283 * Ensure they are executed in CmdSN order.
    284 */
    285int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
    286			unsigned char *buf, __be32 cmdsn)
    287{
    288	int ret, cmdsn_ret;
    289	bool reject = false;
    290	u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;
    291
    292	mutex_lock(&conn->sess->cmdsn_mutex);
    293
    294	cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn));
    295	switch (cmdsn_ret) {
    296	case CMDSN_NORMAL_OPERATION:
    297		ret = iscsit_execute_cmd(cmd, 0);
    298		if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
    299			iscsit_execute_ooo_cmdsns(conn->sess);
    300		else if (ret < 0) {
    301			reject = true;
    302			ret = CMDSN_ERROR_CANNOT_RECOVER;
    303		}
    304		break;
    305	case CMDSN_HIGHER_THAN_EXP:
    306		ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
    307		if (ret < 0) {
    308			reject = true;
    309			ret = CMDSN_ERROR_CANNOT_RECOVER;
    310			break;
    311		}
    312		ret = CMDSN_HIGHER_THAN_EXP;
    313		break;
    314	case CMDSN_LOWER_THAN_EXP:
    315	case CMDSN_MAXCMDSN_OVERRUN:
    316	default:
    317		cmd->i_state = ISTATE_REMOVE;
    318		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
    319		/*
    320		 * Existing callers for iscsit_sequence_cmd() will silently
    321		 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
    322		 * return for CMDSN_MAXCMDSN_OVERRUN as well..
    323		 */
    324		ret = CMDSN_LOWER_THAN_EXP;
    325		break;
    326	}
    327	mutex_unlock(&conn->sess->cmdsn_mutex);
    328
    329	if (reject)
    330		iscsit_reject_cmd(cmd, reason, buf);
    331
    332	return ret;
    333}
    334EXPORT_SYMBOL(iscsit_sequence_cmd);
    335
    336int iscsit_check_unsolicited_dataout(struct iscsit_cmd *cmd, unsigned char *buf)
    337{
    338	struct iscsit_conn *conn = cmd->conn;
    339	struct se_cmd *se_cmd = &cmd->se_cmd;
    340	struct iscsi_data *hdr = (struct iscsi_data *) buf;
    341	u32 payload_length = ntoh24(hdr->dlength);
    342
    343	if (conn->sess->sess_ops->InitialR2T) {
    344		pr_err("Received unexpected unsolicited data"
    345			" while InitialR2T=Yes, protocol error.\n");
    346		transport_send_check_condition_and_sense(se_cmd,
    347				TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
    348		return -1;
    349	}
    350
    351	if ((cmd->first_burst_len + payload_length) >
    352	     conn->sess->sess_ops->FirstBurstLength) {
    353		pr_err("Total %u bytes exceeds FirstBurstLength: %u"
    354			" for this Unsolicited DataOut Burst.\n",
    355			(cmd->first_burst_len + payload_length),
    356				conn->sess->sess_ops->FirstBurstLength);
    357		transport_send_check_condition_and_sense(se_cmd,
    358				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
    359		return -1;
    360	}
    361
    362	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
    363		return 0;
    364
    365	if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
    366	    ((cmd->first_burst_len + payload_length) !=
    367	      conn->sess->sess_ops->FirstBurstLength)) {
    368		pr_err("Unsolicited non-immediate data received %u"
    369			" does not equal FirstBurstLength: %u, and does"
    370			" not equal ExpXferLen %u.\n",
    371			(cmd->first_burst_len + payload_length),
    372			conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
    373		transport_send_check_condition_and_sense(se_cmd,
    374				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
    375		return -1;
    376	}
    377	return 0;
    378}
    379
    380struct iscsit_cmd *iscsit_find_cmd_from_itt(
    381	struct iscsit_conn *conn,
    382	itt_t init_task_tag)
    383{
    384	struct iscsit_cmd *cmd;
    385
    386	spin_lock_bh(&conn->cmd_lock);
    387	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
    388		if (cmd->init_task_tag == init_task_tag) {
    389			spin_unlock_bh(&conn->cmd_lock);
    390			return cmd;
    391		}
    392	}
    393	spin_unlock_bh(&conn->cmd_lock);
    394
    395	pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
    396			init_task_tag, conn->cid);
    397	return NULL;
    398}
    399EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
    400
    401struct iscsit_cmd *iscsit_find_cmd_from_itt_or_dump(
    402	struct iscsit_conn *conn,
    403	itt_t init_task_tag,
    404	u32 length)
    405{
    406	struct iscsit_cmd *cmd;
    407
    408	spin_lock_bh(&conn->cmd_lock);
    409	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
    410		if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT)
    411			continue;
    412		if (cmd->init_task_tag == init_task_tag) {
    413			spin_unlock_bh(&conn->cmd_lock);
    414			return cmd;
    415		}
    416	}
    417	spin_unlock_bh(&conn->cmd_lock);
    418
    419	pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
    420			" dumping payload\n", init_task_tag, conn->cid);
    421	if (length)
    422		iscsit_dump_data_payload(conn, length, 1);
    423
    424	return NULL;
    425}
    426EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump);
    427
    428struct iscsit_cmd *iscsit_find_cmd_from_ttt(
    429	struct iscsit_conn *conn,
    430	u32 targ_xfer_tag)
    431{
    432	struct iscsit_cmd *cmd = NULL;
    433
    434	spin_lock_bh(&conn->cmd_lock);
    435	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
    436		if (cmd->targ_xfer_tag == targ_xfer_tag) {
    437			spin_unlock_bh(&conn->cmd_lock);
    438			return cmd;
    439		}
    440	}
    441	spin_unlock_bh(&conn->cmd_lock);
    442
    443	pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
    444			targ_xfer_tag, conn->cid);
    445	return NULL;
    446}
    447
    448int iscsit_find_cmd_for_recovery(
    449	struct iscsit_session *sess,
    450	struct iscsit_cmd **cmd_ptr,
    451	struct iscsi_conn_recovery **cr_ptr,
    452	itt_t init_task_tag)
    453{
    454	struct iscsit_cmd *cmd = NULL;
    455	struct iscsi_conn_recovery *cr;
    456	/*
    457	 * Scan through the inactive connection recovery list's command list.
    458	 * If init_task_tag matches the command is still alligent.
    459	 */
    460	spin_lock(&sess->cr_i_lock);
    461	list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
    462		spin_lock(&cr->conn_recovery_cmd_lock);
    463		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
    464			if (cmd->init_task_tag == init_task_tag) {
    465				spin_unlock(&cr->conn_recovery_cmd_lock);
    466				spin_unlock(&sess->cr_i_lock);
    467
    468				*cr_ptr = cr;
    469				*cmd_ptr = cmd;
    470				return -2;
    471			}
    472		}
    473		spin_unlock(&cr->conn_recovery_cmd_lock);
    474	}
    475	spin_unlock(&sess->cr_i_lock);
    476	/*
    477	 * Scan through the active connection recovery list's command list.
    478	 * If init_task_tag matches the command is ready to be reassigned.
    479	 */
    480	spin_lock(&sess->cr_a_lock);
    481	list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
    482		spin_lock(&cr->conn_recovery_cmd_lock);
    483		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
    484			if (cmd->init_task_tag == init_task_tag) {
    485				spin_unlock(&cr->conn_recovery_cmd_lock);
    486				spin_unlock(&sess->cr_a_lock);
    487
    488				*cr_ptr = cr;
    489				*cmd_ptr = cmd;
    490				return 0;
    491			}
    492		}
    493		spin_unlock(&cr->conn_recovery_cmd_lock);
    494	}
    495	spin_unlock(&sess->cr_a_lock);
    496
    497	return -1;
    498}
    499
    500void iscsit_add_cmd_to_immediate_queue(
    501	struct iscsit_cmd *cmd,
    502	struct iscsit_conn *conn,
    503	u8 state)
    504{
    505	struct iscsi_queue_req *qr;
    506
    507	qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
    508	if (!qr) {
    509		pr_err("Unable to allocate memory for"
    510				" struct iscsi_queue_req\n");
    511		return;
    512	}
    513	INIT_LIST_HEAD(&qr->qr_list);
    514	qr->cmd = cmd;
    515	qr->state = state;
    516
    517	spin_lock_bh(&conn->immed_queue_lock);
    518	list_add_tail(&qr->qr_list, &conn->immed_queue_list);
    519	atomic_inc(&cmd->immed_queue_count);
    520	atomic_set(&conn->check_immediate_queue, 1);
    521	spin_unlock_bh(&conn->immed_queue_lock);
    522
    523	wake_up(&conn->queues_wq);
    524}
    525EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
    526
    527struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *conn)
    528{
    529	struct iscsi_queue_req *qr;
    530
    531	spin_lock_bh(&conn->immed_queue_lock);
    532	if (list_empty(&conn->immed_queue_list)) {
    533		spin_unlock_bh(&conn->immed_queue_lock);
    534		return NULL;
    535	}
    536	qr = list_first_entry(&conn->immed_queue_list,
    537			      struct iscsi_queue_req, qr_list);
    538
    539	list_del(&qr->qr_list);
    540	if (qr->cmd)
    541		atomic_dec(&qr->cmd->immed_queue_count);
    542	spin_unlock_bh(&conn->immed_queue_lock);
    543
    544	return qr;
    545}
    546
    547static void iscsit_remove_cmd_from_immediate_queue(
    548	struct iscsit_cmd *cmd,
    549	struct iscsit_conn *conn)
    550{
    551	struct iscsi_queue_req *qr, *qr_tmp;
    552
    553	spin_lock_bh(&conn->immed_queue_lock);
    554	if (!atomic_read(&cmd->immed_queue_count)) {
    555		spin_unlock_bh(&conn->immed_queue_lock);
    556		return;
    557	}
    558
    559	list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
    560		if (qr->cmd != cmd)
    561			continue;
    562
    563		atomic_dec(&qr->cmd->immed_queue_count);
    564		list_del(&qr->qr_list);
    565		kmem_cache_free(lio_qr_cache, qr);
    566	}
    567	spin_unlock_bh(&conn->immed_queue_lock);
    568
    569	if (atomic_read(&cmd->immed_queue_count)) {
    570		pr_err("ITT: 0x%08x immed_queue_count: %d\n",
    571			cmd->init_task_tag,
    572			atomic_read(&cmd->immed_queue_count));
    573	}
    574}
    575
    576int iscsit_add_cmd_to_response_queue(
    577	struct iscsit_cmd *cmd,
    578	struct iscsit_conn *conn,
    579	u8 state)
    580{
    581	struct iscsi_queue_req *qr;
    582
    583	qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
    584	if (!qr) {
    585		pr_err("Unable to allocate memory for"
    586			" struct iscsi_queue_req\n");
    587		return -ENOMEM;
    588	}
    589	INIT_LIST_HEAD(&qr->qr_list);
    590	qr->cmd = cmd;
    591	qr->state = state;
    592
    593	spin_lock_bh(&conn->response_queue_lock);
    594	list_add_tail(&qr->qr_list, &conn->response_queue_list);
    595	atomic_inc(&cmd->response_queue_count);
    596	spin_unlock_bh(&conn->response_queue_lock);
    597
    598	wake_up(&conn->queues_wq);
    599	return 0;
    600}
    601
    602struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *conn)
    603{
    604	struct iscsi_queue_req *qr;
    605
    606	spin_lock_bh(&conn->response_queue_lock);
    607	if (list_empty(&conn->response_queue_list)) {
    608		spin_unlock_bh(&conn->response_queue_lock);
    609		return NULL;
    610	}
    611
    612	qr = list_first_entry(&conn->response_queue_list,
    613			      struct iscsi_queue_req, qr_list);
    614
    615	list_del(&qr->qr_list);
    616	if (qr->cmd)
    617		atomic_dec(&qr->cmd->response_queue_count);
    618	spin_unlock_bh(&conn->response_queue_lock);
    619
    620	return qr;
    621}
    622
    623static void iscsit_remove_cmd_from_response_queue(
    624	struct iscsit_cmd *cmd,
    625	struct iscsit_conn *conn)
    626{
    627	struct iscsi_queue_req *qr, *qr_tmp;
    628
    629	spin_lock_bh(&conn->response_queue_lock);
    630	if (!atomic_read(&cmd->response_queue_count)) {
    631		spin_unlock_bh(&conn->response_queue_lock);
    632		return;
    633	}
    634
    635	list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
    636				qr_list) {
    637		if (qr->cmd != cmd)
    638			continue;
    639
    640		atomic_dec(&qr->cmd->response_queue_count);
    641		list_del(&qr->qr_list);
    642		kmem_cache_free(lio_qr_cache, qr);
    643	}
    644	spin_unlock_bh(&conn->response_queue_lock);
    645
    646	if (atomic_read(&cmd->response_queue_count)) {
    647		pr_err("ITT: 0x%08x response_queue_count: %d\n",
    648			cmd->init_task_tag,
    649			atomic_read(&cmd->response_queue_count));
    650	}
    651}
    652
    653bool iscsit_conn_all_queues_empty(struct iscsit_conn *conn)
    654{
    655	bool empty;
    656
    657	spin_lock_bh(&conn->immed_queue_lock);
    658	empty = list_empty(&conn->immed_queue_list);
    659	spin_unlock_bh(&conn->immed_queue_lock);
    660
    661	if (!empty)
    662		return empty;
    663
    664	spin_lock_bh(&conn->response_queue_lock);
    665	empty = list_empty(&conn->response_queue_list);
    666	spin_unlock_bh(&conn->response_queue_lock);
    667
    668	return empty;
    669}
    670
    671void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *conn)
    672{
    673	struct iscsi_queue_req *qr, *qr_tmp;
    674
    675	spin_lock_bh(&conn->immed_queue_lock);
    676	list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
    677		list_del(&qr->qr_list);
    678		if (qr->cmd)
    679			atomic_dec(&qr->cmd->immed_queue_count);
    680
    681		kmem_cache_free(lio_qr_cache, qr);
    682	}
    683	spin_unlock_bh(&conn->immed_queue_lock);
    684
    685	spin_lock_bh(&conn->response_queue_lock);
    686	list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
    687			qr_list) {
    688		list_del(&qr->qr_list);
    689		if (qr->cmd)
    690			atomic_dec(&qr->cmd->response_queue_count);
    691
    692		kmem_cache_free(lio_qr_cache, qr);
    693	}
    694	spin_unlock_bh(&conn->response_queue_lock);
    695}
    696
    697void iscsit_release_cmd(struct iscsit_cmd *cmd)
    698{
    699	struct iscsit_session *sess;
    700	struct se_cmd *se_cmd = &cmd->se_cmd;
    701
    702	WARN_ON(!list_empty(&cmd->i_conn_node));
    703
    704	if (cmd->conn)
    705		sess = cmd->conn->sess;
    706	else
    707		sess = cmd->sess;
    708
    709	BUG_ON(!sess || !sess->se_sess);
    710
    711	kfree(cmd->buf_ptr);
    712	kfree(cmd->pdu_list);
    713	kfree(cmd->seq_list);
    714	kfree(cmd->tmr_req);
    715	kfree(cmd->overflow_buf);
    716	kfree(cmd->iov_data);
    717	kfree(cmd->text_in_ptr);
    718
    719	target_free_tag(sess->se_sess, se_cmd);
    720}
    721EXPORT_SYMBOL(iscsit_release_cmd);
    722
    723void __iscsit_free_cmd(struct iscsit_cmd *cmd, bool check_queues)
    724{
    725	struct iscsit_conn *conn = cmd->conn;
    726
    727	WARN_ON(!list_empty(&cmd->i_conn_node));
    728
    729	if (cmd->data_direction == DMA_TO_DEVICE) {
    730		iscsit_stop_dataout_timer(cmd);
    731		iscsit_free_r2ts_from_list(cmd);
    732	}
    733	if (cmd->data_direction == DMA_FROM_DEVICE)
    734		iscsit_free_all_datain_reqs(cmd);
    735
    736	if (conn && check_queues) {
    737		iscsit_remove_cmd_from_immediate_queue(cmd, conn);
    738		iscsit_remove_cmd_from_response_queue(cmd, conn);
    739	}
    740
    741	if (conn && conn->conn_transport->iscsit_unmap_cmd)
    742		conn->conn_transport->iscsit_unmap_cmd(conn, cmd);
    743}
    744
    745void iscsit_free_cmd(struct iscsit_cmd *cmd, bool shutdown)
    746{
    747	struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
    748	int rc;
    749
    750	WARN_ON(!list_empty(&cmd->i_conn_node));
    751
    752	__iscsit_free_cmd(cmd, shutdown);
    753	if (se_cmd) {
    754		rc = transport_generic_free_cmd(se_cmd, shutdown);
    755		if (!rc && shutdown && se_cmd->se_sess) {
    756			__iscsit_free_cmd(cmd, shutdown);
    757			target_put_sess_cmd(se_cmd);
    758		}
    759	} else {
    760		iscsit_release_cmd(cmd);
    761	}
    762}
    763EXPORT_SYMBOL(iscsit_free_cmd);
    764
    765bool iscsit_check_session_usage_count(struct iscsit_session *sess,
    766				      bool can_sleep)
    767{
    768	spin_lock_bh(&sess->session_usage_lock);
    769	if (sess->session_usage_count != 0) {
    770		sess->session_waiting_on_uc = 1;
    771		spin_unlock_bh(&sess->session_usage_lock);
    772		if (!can_sleep)
    773			return true;
    774
    775		wait_for_completion(&sess->session_waiting_on_uc_comp);
    776		return false;
    777	}
    778	spin_unlock_bh(&sess->session_usage_lock);
    779
    780	return false;
    781}
    782
    783void iscsit_dec_session_usage_count(struct iscsit_session *sess)
    784{
    785	spin_lock_bh(&sess->session_usage_lock);
    786	sess->session_usage_count--;
    787
    788	if (!sess->session_usage_count && sess->session_waiting_on_uc)
    789		complete(&sess->session_waiting_on_uc_comp);
    790
    791	spin_unlock_bh(&sess->session_usage_lock);
    792}
    793
    794void iscsit_inc_session_usage_count(struct iscsit_session *sess)
    795{
    796	spin_lock_bh(&sess->session_usage_lock);
    797	sess->session_usage_count++;
    798	spin_unlock_bh(&sess->session_usage_lock);
    799}
    800
    801struct iscsit_conn *iscsit_get_conn_from_cid(struct iscsit_session *sess, u16 cid)
    802{
    803	struct iscsit_conn *conn;
    804
    805	spin_lock_bh(&sess->conn_lock);
    806	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
    807		if ((conn->cid == cid) &&
    808		    (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
    809			iscsit_inc_conn_usage_count(conn);
    810			spin_unlock_bh(&sess->conn_lock);
    811			return conn;
    812		}
    813	}
    814	spin_unlock_bh(&sess->conn_lock);
    815
    816	return NULL;
    817}
    818
    819struct iscsit_conn *iscsit_get_conn_from_cid_rcfr(struct iscsit_session *sess, u16 cid)
    820{
    821	struct iscsit_conn *conn;
    822
    823	spin_lock_bh(&sess->conn_lock);
    824	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
    825		if (conn->cid == cid) {
    826			iscsit_inc_conn_usage_count(conn);
    827			spin_lock(&conn->state_lock);
    828			atomic_set(&conn->connection_wait_rcfr, 1);
    829			spin_unlock(&conn->state_lock);
    830			spin_unlock_bh(&sess->conn_lock);
    831			return conn;
    832		}
    833	}
    834	spin_unlock_bh(&sess->conn_lock);
    835
    836	return NULL;
    837}
    838
    839void iscsit_check_conn_usage_count(struct iscsit_conn *conn)
    840{
    841	spin_lock_bh(&conn->conn_usage_lock);
    842	if (conn->conn_usage_count != 0) {
    843		conn->conn_waiting_on_uc = 1;
    844		spin_unlock_bh(&conn->conn_usage_lock);
    845
    846		wait_for_completion(&conn->conn_waiting_on_uc_comp);
    847		return;
    848	}
    849	spin_unlock_bh(&conn->conn_usage_lock);
    850}
    851
    852void iscsit_dec_conn_usage_count(struct iscsit_conn *conn)
    853{
    854	spin_lock_bh(&conn->conn_usage_lock);
    855	conn->conn_usage_count--;
    856
    857	if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
    858		complete(&conn->conn_waiting_on_uc_comp);
    859
    860	spin_unlock_bh(&conn->conn_usage_lock);
    861}
    862
    863void iscsit_inc_conn_usage_count(struct iscsit_conn *conn)
    864{
    865	spin_lock_bh(&conn->conn_usage_lock);
    866	conn->conn_usage_count++;
    867	spin_unlock_bh(&conn->conn_usage_lock);
    868}
    869
    870static int iscsit_add_nopin(struct iscsit_conn *conn, int want_response)
    871{
    872	u8 state;
    873	struct iscsit_cmd *cmd;
    874
    875	cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
    876	if (!cmd)
    877		return -1;
    878
    879	cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
    880	state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
    881				ISTATE_SEND_NOPIN_NO_RESPONSE;
    882	cmd->init_task_tag = RESERVED_ITT;
    883	cmd->targ_xfer_tag = (want_response) ?
    884			     session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
    885	spin_lock_bh(&conn->cmd_lock);
    886	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
    887	spin_unlock_bh(&conn->cmd_lock);
    888
    889	if (want_response)
    890		iscsit_start_nopin_response_timer(conn);
    891	iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
    892
    893	return 0;
    894}
    895
    896void iscsit_handle_nopin_response_timeout(struct timer_list *t)
    897{
    898	struct iscsit_conn *conn = from_timer(conn, t, nopin_response_timer);
    899	struct iscsit_session *sess = conn->sess;
    900
    901	iscsit_inc_conn_usage_count(conn);
    902
    903	spin_lock_bh(&conn->nopin_timer_lock);
    904	if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
    905		spin_unlock_bh(&conn->nopin_timer_lock);
    906		iscsit_dec_conn_usage_count(conn);
    907		return;
    908	}
    909
    910	pr_err("Did not receive response to NOPIN on CID: %hu, failing"
    911		" connection for I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
    912		conn->cid, sess->sess_ops->InitiatorName, sess->isid,
    913		sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
    914	conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
    915	spin_unlock_bh(&conn->nopin_timer_lock);
    916
    917	iscsit_fill_cxn_timeout_err_stats(sess);
    918	iscsit_cause_connection_reinstatement(conn, 0);
    919	iscsit_dec_conn_usage_count(conn);
    920}
    921
    922void iscsit_mod_nopin_response_timer(struct iscsit_conn *conn)
    923{
    924	struct iscsit_session *sess = conn->sess;
    925	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
    926
    927	spin_lock_bh(&conn->nopin_timer_lock);
    928	if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
    929		spin_unlock_bh(&conn->nopin_timer_lock);
    930		return;
    931	}
    932
    933	mod_timer(&conn->nopin_response_timer,
    934		(get_jiffies_64() + na->nopin_response_timeout * HZ));
    935	spin_unlock_bh(&conn->nopin_timer_lock);
    936}
    937
    938void iscsit_start_nopin_response_timer(struct iscsit_conn *conn)
    939{
    940	struct iscsit_session *sess = conn->sess;
    941	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
    942
    943	spin_lock_bh(&conn->nopin_timer_lock);
    944	if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
    945		spin_unlock_bh(&conn->nopin_timer_lock);
    946		return;
    947	}
    948
    949	conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
    950	conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
    951	mod_timer(&conn->nopin_response_timer,
    952		  jiffies + na->nopin_response_timeout * HZ);
    953
    954	pr_debug("Started NOPIN Response Timer on CID: %d to %u"
    955		" seconds\n", conn->cid, na->nopin_response_timeout);
    956	spin_unlock_bh(&conn->nopin_timer_lock);
    957}
    958
    959void iscsit_stop_nopin_response_timer(struct iscsit_conn *conn)
    960{
    961	spin_lock_bh(&conn->nopin_timer_lock);
    962	if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
    963		spin_unlock_bh(&conn->nopin_timer_lock);
    964		return;
    965	}
    966	conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
    967	spin_unlock_bh(&conn->nopin_timer_lock);
    968
    969	del_timer_sync(&conn->nopin_response_timer);
    970
    971	spin_lock_bh(&conn->nopin_timer_lock);
    972	conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
    973	spin_unlock_bh(&conn->nopin_timer_lock);
    974}
    975
    976void iscsit_handle_nopin_timeout(struct timer_list *t)
    977{
    978	struct iscsit_conn *conn = from_timer(conn, t, nopin_timer);
    979
    980	iscsit_inc_conn_usage_count(conn);
    981
    982	spin_lock_bh(&conn->nopin_timer_lock);
    983	if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
    984		spin_unlock_bh(&conn->nopin_timer_lock);
    985		iscsit_dec_conn_usage_count(conn);
    986		return;
    987	}
    988	conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
    989	spin_unlock_bh(&conn->nopin_timer_lock);
    990
    991	iscsit_add_nopin(conn, 1);
    992	iscsit_dec_conn_usage_count(conn);
    993}
    994
    995void __iscsit_start_nopin_timer(struct iscsit_conn *conn)
    996{
    997	struct iscsit_session *sess = conn->sess;
    998	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
    999
   1000	lockdep_assert_held(&conn->nopin_timer_lock);
   1001
   1002	/*
   1003	* NOPIN timeout is disabled.
   1004	 */
   1005	if (!na->nopin_timeout)
   1006		return;
   1007
   1008	if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
   1009		return;
   1010
   1011	conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
   1012	conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
   1013	mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
   1014
   1015	pr_debug("Started NOPIN Timer on CID: %d at %u second"
   1016		" interval\n", conn->cid, na->nopin_timeout);
   1017}
   1018
   1019void iscsit_start_nopin_timer(struct iscsit_conn *conn)
   1020{
   1021	spin_lock_bh(&conn->nopin_timer_lock);
   1022	__iscsit_start_nopin_timer(conn);
   1023	spin_unlock_bh(&conn->nopin_timer_lock);
   1024}
   1025
   1026void iscsit_stop_nopin_timer(struct iscsit_conn *conn)
   1027{
   1028	spin_lock_bh(&conn->nopin_timer_lock);
   1029	if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
   1030		spin_unlock_bh(&conn->nopin_timer_lock);
   1031		return;
   1032	}
   1033	conn->nopin_timer_flags |= ISCSI_TF_STOP;
   1034	spin_unlock_bh(&conn->nopin_timer_lock);
   1035
   1036	del_timer_sync(&conn->nopin_timer);
   1037
   1038	spin_lock_bh(&conn->nopin_timer_lock);
   1039	conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
   1040	spin_unlock_bh(&conn->nopin_timer_lock);
   1041}
   1042
   1043int iscsit_send_tx_data(
   1044	struct iscsit_cmd *cmd,
   1045	struct iscsit_conn *conn,
   1046	int use_misc)
   1047{
   1048	int tx_sent, tx_size;
   1049	u32 iov_count;
   1050	struct kvec *iov;
   1051
   1052send_data:
   1053	tx_size = cmd->tx_size;
   1054
   1055	if (!use_misc) {
   1056		iov = &cmd->iov_data[0];
   1057		iov_count = cmd->iov_data_count;
   1058	} else {
   1059		iov = &cmd->iov_misc[0];
   1060		iov_count = cmd->iov_misc_count;
   1061	}
   1062
   1063	tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
   1064	if (tx_size != tx_sent) {
   1065		if (tx_sent == -EAGAIN) {
   1066			pr_err("tx_data() returned -EAGAIN\n");
   1067			goto send_data;
   1068		} else
   1069			return -1;
   1070	}
   1071	cmd->tx_size = 0;
   1072
   1073	return 0;
   1074}
   1075
   1076int iscsit_fe_sendpage_sg(
   1077	struct iscsit_cmd *cmd,
   1078	struct iscsit_conn *conn)
   1079{
   1080	struct scatterlist *sg = cmd->first_data_sg;
   1081	struct kvec iov;
   1082	u32 tx_hdr_size, data_len;
   1083	u32 offset = cmd->first_data_sg_off;
   1084	int tx_sent, iov_off;
   1085
   1086send_hdr:
   1087	tx_hdr_size = ISCSI_HDR_LEN;
   1088	if (conn->conn_ops->HeaderDigest)
   1089		tx_hdr_size += ISCSI_CRC_LEN;
   1090
   1091	iov.iov_base = cmd->pdu;
   1092	iov.iov_len = tx_hdr_size;
   1093
   1094	tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
   1095	if (tx_hdr_size != tx_sent) {
   1096		if (tx_sent == -EAGAIN) {
   1097			pr_err("tx_data() returned -EAGAIN\n");
   1098			goto send_hdr;
   1099		}
   1100		return -1;
   1101	}
   1102
   1103	data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
   1104	/*
   1105	 * Set iov_off used by padding and data digest tx_data() calls below
   1106	 * in order to determine proper offset into cmd->iov_data[]
   1107	 */
   1108	if (conn->conn_ops->DataDigest) {
   1109		data_len -= ISCSI_CRC_LEN;
   1110		if (cmd->padding)
   1111			iov_off = (cmd->iov_data_count - 2);
   1112		else
   1113			iov_off = (cmd->iov_data_count - 1);
   1114	} else {
   1115		iov_off = (cmd->iov_data_count - 1);
   1116	}
   1117	/*
   1118	 * Perform sendpage() for each page in the scatterlist
   1119	 */
   1120	while (data_len) {
   1121		u32 space = (sg->length - offset);
   1122		u32 sub_len = min_t(u32, data_len, space);
   1123send_pg:
   1124		tx_sent = conn->sock->ops->sendpage(conn->sock,
   1125					sg_page(sg), sg->offset + offset, sub_len, 0);
   1126		if (tx_sent != sub_len) {
   1127			if (tx_sent == -EAGAIN) {
   1128				pr_err("tcp_sendpage() returned"
   1129						" -EAGAIN\n");
   1130				goto send_pg;
   1131			}
   1132
   1133			pr_err("tcp_sendpage() failure: %d\n",
   1134					tx_sent);
   1135			return -1;
   1136		}
   1137
   1138		data_len -= sub_len;
   1139		offset = 0;
   1140		sg = sg_next(sg);
   1141	}
   1142
   1143send_padding:
   1144	if (cmd->padding) {
   1145		struct kvec *iov_p = &cmd->iov_data[iov_off++];
   1146
   1147		tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
   1148		if (cmd->padding != tx_sent) {
   1149			if (tx_sent == -EAGAIN) {
   1150				pr_err("tx_data() returned -EAGAIN\n");
   1151				goto send_padding;
   1152			}
   1153			return -1;
   1154		}
   1155	}
   1156
   1157send_datacrc:
   1158	if (conn->conn_ops->DataDigest) {
   1159		struct kvec *iov_d = &cmd->iov_data[iov_off];
   1160
   1161		tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
   1162		if (ISCSI_CRC_LEN != tx_sent) {
   1163			if (tx_sent == -EAGAIN) {
   1164				pr_err("tx_data() returned -EAGAIN\n");
   1165				goto send_datacrc;
   1166			}
   1167			return -1;
   1168		}
   1169	}
   1170
   1171	return 0;
   1172}
   1173
   1174/*
   1175 *      This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
   1176 *      back to the Initiator when an expection condition occurs with the
   1177 *      errors set in status_class and status_detail.
   1178 *
   1179 *      Parameters:     iSCSI Connection, Status Class, Status Detail.
   1180 *      Returns:        0 on success, -1 on error.
   1181 */
   1182int iscsit_tx_login_rsp(struct iscsit_conn *conn, u8 status_class, u8 status_detail)
   1183{
   1184	struct iscsi_login_rsp *hdr;
   1185	struct iscsi_login *login = conn->conn_login;
   1186
   1187	login->login_failed = 1;
   1188	iscsit_collect_login_stats(conn, status_class, status_detail);
   1189
   1190	memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
   1191
   1192	hdr	= (struct iscsi_login_rsp *)&login->rsp[0];
   1193	hdr->opcode		= ISCSI_OP_LOGIN_RSP;
   1194	hdr->status_class	= status_class;
   1195	hdr->status_detail	= status_detail;
   1196	hdr->itt		= conn->login_itt;
   1197
   1198	return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
   1199}
   1200
   1201void iscsit_print_session_params(struct iscsit_session *sess)
   1202{
   1203	struct iscsit_conn *conn;
   1204
   1205	pr_debug("-----------------------------[Session Params for"
   1206		" SID: %u]-----------------------------\n", sess->sid);
   1207	spin_lock_bh(&sess->conn_lock);
   1208	list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
   1209		iscsi_dump_conn_ops(conn->conn_ops);
   1210	spin_unlock_bh(&sess->conn_lock);
   1211
   1212	iscsi_dump_sess_ops(sess->sess_ops);
   1213}
   1214
   1215int rx_data(
   1216	struct iscsit_conn *conn,
   1217	struct kvec *iov,
   1218	int iov_count,
   1219	int data)
   1220{
   1221	int rx_loop = 0, total_rx = 0;
   1222	struct msghdr msg;
   1223
   1224	if (!conn || !conn->sock || !conn->conn_ops)
   1225		return -1;
   1226
   1227	memset(&msg, 0, sizeof(struct msghdr));
   1228	iov_iter_kvec(&msg.msg_iter, READ, iov, iov_count, data);
   1229
   1230	while (msg_data_left(&msg)) {
   1231		rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
   1232		if (rx_loop <= 0) {
   1233			pr_debug("rx_loop: %d total_rx: %d\n",
   1234				rx_loop, total_rx);
   1235			return rx_loop;
   1236		}
   1237		total_rx += rx_loop;
   1238		pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
   1239				rx_loop, total_rx, data);
   1240	}
   1241
   1242	return total_rx;
   1243}
   1244
   1245int tx_data(
   1246	struct iscsit_conn *conn,
   1247	struct kvec *iov,
   1248	int iov_count,
   1249	int data)
   1250{
   1251	struct msghdr msg;
   1252	int total_tx = 0;
   1253
   1254	if (!conn || !conn->sock || !conn->conn_ops)
   1255		return -1;
   1256
   1257	if (data <= 0) {
   1258		pr_err("Data length is: %d\n", data);
   1259		return -1;
   1260	}
   1261
   1262	memset(&msg, 0, sizeof(struct msghdr));
   1263
   1264	iov_iter_kvec(&msg.msg_iter, WRITE, iov, iov_count, data);
   1265
   1266	while (msg_data_left(&msg)) {
   1267		int tx_loop = sock_sendmsg(conn->sock, &msg);
   1268		if (tx_loop <= 0) {
   1269			pr_debug("tx_loop: %d total_tx %d\n",
   1270				tx_loop, total_tx);
   1271			return tx_loop;
   1272		}
   1273		total_tx += tx_loop;
   1274		pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
   1275					tx_loop, total_tx, data);
   1276	}
   1277
   1278	return total_tx;
   1279}
   1280
   1281void iscsit_collect_login_stats(
   1282	struct iscsit_conn *conn,
   1283	u8 status_class,
   1284	u8 status_detail)
   1285{
   1286	struct iscsi_param *intrname = NULL;
   1287	struct iscsi_tiqn *tiqn;
   1288	struct iscsi_login_stats *ls;
   1289
   1290	tiqn = iscsit_snmp_get_tiqn(conn);
   1291	if (!tiqn)
   1292		return;
   1293
   1294	ls = &tiqn->login_stats;
   1295
   1296	spin_lock(&ls->lock);
   1297	if (status_class == ISCSI_STATUS_CLS_SUCCESS)
   1298		ls->accepts++;
   1299	else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
   1300		ls->redirects++;
   1301		ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
   1302	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
   1303		 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
   1304		ls->authenticate_fails++;
   1305		ls->last_fail_type =  ISCSI_LOGIN_FAIL_AUTHENTICATE;
   1306	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
   1307		 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
   1308		ls->authorize_fails++;
   1309		ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
   1310	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
   1311		 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
   1312		ls->negotiate_fails++;
   1313		ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
   1314	} else {
   1315		ls->other_fails++;
   1316		ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
   1317	}
   1318
   1319	/* Save initiator name, ip address and time, if it is a failed login */
   1320	if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
   1321		if (conn->param_list)
   1322			intrname = iscsi_find_param_from_key(INITIATORNAME,
   1323							     conn->param_list);
   1324		strlcpy(ls->last_intr_fail_name,
   1325		       (intrname ? intrname->value : "Unknown"),
   1326		       sizeof(ls->last_intr_fail_name));
   1327
   1328		ls->last_intr_fail_ip_family = conn->login_family;
   1329
   1330		ls->last_intr_fail_sockaddr = conn->login_sockaddr;
   1331		ls->last_fail_time = get_jiffies_64();
   1332	}
   1333
   1334	spin_unlock(&ls->lock);
   1335}
   1336
   1337struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsit_conn *conn)
   1338{
   1339	struct iscsi_portal_group *tpg;
   1340
   1341	if (!conn)
   1342		return NULL;
   1343
   1344	tpg = conn->tpg;
   1345	if (!tpg)
   1346		return NULL;
   1347
   1348	if (!tpg->tpg_tiqn)
   1349		return NULL;
   1350
   1351	return tpg->tpg_tiqn;
   1352}
   1353
   1354void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session *sess)
   1355{
   1356	struct iscsi_portal_group *tpg = sess->tpg;
   1357	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
   1358
   1359	if (!tiqn)
   1360		return;
   1361
   1362	spin_lock_bh(&tiqn->sess_err_stats.lock);
   1363	strlcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
   1364			sess->sess_ops->InitiatorName,
   1365			sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name));
   1366	tiqn->sess_err_stats.last_sess_failure_type =
   1367			ISCSI_SESS_ERR_CXN_TIMEOUT;
   1368	tiqn->sess_err_stats.cxn_timeout_errors++;
   1369	atomic_long_inc(&sess->conn_timeout_errors);
   1370	spin_unlock_bh(&tiqn->sess_err_stats.lock);
   1371}