cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cxgbit_target.c (41554B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2016 Chelsio Communications, Inc.
      4 */
      5
      6#include <linux/workqueue.h>
      7#include <linux/kthread.h>
      8#include <linux/sched/signal.h>
      9
     10#include <asm/unaligned.h>
     11#include <net/tcp.h>
     12#include <target/target_core_base.h>
     13#include <target/target_core_fabric.h>
     14#include "cxgbit.h"
     15
     16struct sge_opaque_hdr {
     17	void *dev;
     18	dma_addr_t addr[MAX_SKB_FRAGS + 1];
     19};
     20
     21static const u8 cxgbit_digest_len[] = {0, 4, 4, 8};
     22
     23#define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \
     24		    sizeof(struct fw_ofld_tx_data_wr))
     25
     26static struct sk_buff *
     27__cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso)
     28{
     29	struct sk_buff *skb = NULL;
     30	u8 submode = 0;
     31	int errcode;
     32	static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN;
     33
     34	if (len) {
     35		skb = alloc_skb_with_frags(hdr_len, len,
     36					   0, &errcode,
     37					   GFP_KERNEL);
     38		if (!skb)
     39			return NULL;
     40
     41		skb_reserve(skb, TX_HDR_LEN);
     42		skb_reset_transport_header(skb);
     43		__skb_put(skb, ISCSI_HDR_LEN);
     44		skb->data_len = len;
     45		skb->len += len;
     46		submode |= (csk->submode & CXGBIT_SUBMODE_DCRC);
     47
     48	} else {
     49		u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0;
     50
     51		skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL);
     52		if (!skb)
     53			return NULL;
     54
     55		skb_reserve(skb, TX_HDR_LEN + iso_len);
     56		skb_reset_transport_header(skb);
     57		__skb_put(skb, ISCSI_HDR_LEN);
     58	}
     59
     60	submode |= (csk->submode & CXGBIT_SUBMODE_HCRC);
     61	cxgbit_skcb_submode(skb) = submode;
     62	cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode];
     63	cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR;
     64	return skb;
     65}
     66
     67static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len)
     68{
     69	return __cxgbit_alloc_skb(csk, len, false);
     70}
     71
     72/*
     73 * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data
     74 * @skb: the packet
     75 *
     76 * Returns true if a packet can be sent as an offload WR with immediate
     77 * data.  We currently use the same limit as for Ethernet packets.
     78 */
     79static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
     80{
     81	int length = skb->len;
     82
     83	if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
     84		length += sizeof(struct fw_ofld_tx_data_wr);
     85
     86	if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
     87		length += sizeof(struct cpl_tx_data_iso);
     88
     89	return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
     90}
     91
     92/*
     93 * cxgbit_sgl_len - calculates the size of an SGL of the given capacity
     94 * @n: the number of SGL entries
     95 * Calculates the number of flits needed for a scatter/gather list that
     96 * can hold the given number of entries.
     97 */
     98static inline unsigned int cxgbit_sgl_len(unsigned int n)
     99{
    100	n--;
    101	return (3 * n) / 2 + (n & 1) + 2;
    102}
    103
    104/*
    105 * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet
    106 * @skb: the packet
    107 *
    108 * Returns the number of flits needed for the given offload packet.
    109 * These packets are already fully constructed and no additional headers
    110 * will be added.
    111 */
    112static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb)
    113{
    114	unsigned int flits, cnt;
    115
    116	if (cxgbit_is_ofld_imm(skb))
    117		return DIV_ROUND_UP(skb->len, 8);
    118	flits = skb_transport_offset(skb) / 8;
    119	cnt = skb_shinfo(skb)->nr_frags;
    120	if (skb_tail_pointer(skb) != skb_transport_header(skb))
    121		cnt++;
    122	return flits + cxgbit_sgl_len(cnt);
    123}
    124
    125#define CXGBIT_ISO_FSLICE 0x1
    126#define CXGBIT_ISO_LSLICE 0x2
    127static void
    128cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info)
    129{
    130	struct cpl_tx_data_iso *cpl;
    131	unsigned int submode = cxgbit_skcb_submode(skb);
    132	unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE);
    133	unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE);
    134
    135	cpl = __skb_push(skb, sizeof(*cpl));
    136
    137	cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
    138			CPL_TX_DATA_ISO_FIRST_V(fslice) |
    139			CPL_TX_DATA_ISO_LAST_V(lslice) |
    140			CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
    141			CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
    142			CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
    143			CPL_TX_DATA_ISO_IMMEDIATE_V(0) |
    144			CPL_TX_DATA_ISO_SCSI_V(2));
    145
    146	cpl->ahs_len = 0;
    147	cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4));
    148	cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4));
    149	cpl->len = htonl(iso_info->len);
    150	cpl->reserved2_seglen_offset = htonl(0);
    151	cpl->datasn_offset = htonl(0);
    152	cpl->buffer_offset = htonl(0);
    153	cpl->reserved3 = 0;
    154
    155	__skb_pull(skb, sizeof(*cpl));
    156}
    157
    158static void
    159cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
    160		  u32 len, u32 credits, u32 compl)
    161{
    162	struct fw_ofld_tx_data_wr *req;
    163	const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
    164	u32 submode = cxgbit_skcb_submode(skb);
    165	u32 wr_ulp_mode = 0;
    166	u32 hdr_size = sizeof(*req);
    167	u32 opcode = FW_OFLD_TX_DATA_WR;
    168	u32 immlen = 0;
    169	u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) :
    170		    T6_TX_FORCE_F;
    171
    172	if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) {
    173		opcode = FW_ISCSI_TX_DATA_WR;
    174		immlen += sizeof(struct cpl_tx_data_iso);
    175		hdr_size += sizeof(struct cpl_tx_data_iso);
    176		submode |= 8;
    177	}
    178
    179	if (cxgbit_is_ofld_imm(skb))
    180		immlen += dlen;
    181
    182	req = __skb_push(skb, hdr_size);
    183	req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
    184					FW_WR_COMPL_V(compl) |
    185					FW_WR_IMMDLEN_V(immlen));
    186	req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
    187					FW_WR_LEN16_V(credits));
    188	req->plen = htonl(len);
    189	wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) |
    190				FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
    191
    192	req->tunnel_to_proxy = htonl(wr_ulp_mode | force |
    193				     FW_OFLD_TX_DATA_WR_SHOVE_F);
    194}
    195
    196static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb)
    197{
    198	kfree_skb(skb);
    199}
    200
    201void cxgbit_push_tx_frames(struct cxgbit_sock *csk)
    202{
    203	struct sk_buff *skb;
    204
    205	while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) {
    206		u32 dlen = skb->len;
    207		u32 len = skb->len;
    208		u32 credits_needed;
    209		u32 compl = 0;
    210		u32 flowclen16 = 0;
    211		u32 iso_cpl_len = 0;
    212
    213		if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)
    214			iso_cpl_len = sizeof(struct cpl_tx_data_iso);
    215
    216		if (cxgbit_is_ofld_imm(skb))
    217			credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
    218		else
    219			credits_needed = DIV_ROUND_UP((8 *
    220					cxgbit_calc_tx_flits_ofld(skb)) +
    221					iso_cpl_len, 16);
    222
    223		if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
    224			credits_needed += DIV_ROUND_UP(
    225				sizeof(struct fw_ofld_tx_data_wr), 16);
    226		/*
    227		 * Assumes the initial credits is large enough to support
    228		 * fw_flowc_wr plus largest possible first payload
    229		 */
    230
    231		if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) {
    232			flowclen16 = cxgbit_send_tx_flowc_wr(csk);
    233			csk->wr_cred -= flowclen16;
    234			csk->wr_una_cred += flowclen16;
    235		}
    236
    237		if (csk->wr_cred < credits_needed) {
    238			pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n",
    239				 csk, skb->len, skb->data_len,
    240				 credits_needed, csk->wr_cred);
    241			break;
    242		}
    243		__skb_unlink(skb, &csk->txq);
    244		set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
    245		skb->csum = (__force __wsum)(credits_needed + flowclen16);
    246		csk->wr_cred -= credits_needed;
    247		csk->wr_una_cred += credits_needed;
    248
    249		pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
    250			 csk, skb->len, skb->data_len, credits_needed,
    251			 csk->wr_cred, csk->wr_una_cred);
    252
    253		if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) {
    254			len += cxgbit_skcb_tx_extralen(skb);
    255
    256			if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
    257			    (!before(csk->write_seq,
    258				     csk->snd_una + csk->snd_win))) {
    259				compl = 1;
    260				csk->wr_una_cred = 0;
    261			}
    262
    263			cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed,
    264					  compl);
    265			csk->snd_nxt += len;
    266
    267		} else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) ||
    268			   (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
    269			struct cpl_close_con_req *req =
    270				(struct cpl_close_con_req *)skb->data;
    271			req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
    272			csk->wr_una_cred = 0;
    273		}
    274
    275		cxgbit_sock_enqueue_wr(csk, skb);
    276		t4_set_arp_err_handler(skb, csk,
    277				       cxgbit_arp_failure_skb_discard);
    278
    279		pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n",
    280			 csk, csk->tid, skb, len);
    281
    282		cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
    283	}
    284}
    285
    286static void cxgbit_unlock_sock(struct cxgbit_sock *csk)
    287{
    288	struct sk_buff_head backlogq;
    289	struct sk_buff *skb;
    290	void (*fn)(struct cxgbit_sock *, struct sk_buff *);
    291
    292	skb_queue_head_init(&backlogq);
    293
    294	spin_lock_bh(&csk->lock);
    295	while (skb_queue_len(&csk->backlogq)) {
    296		skb_queue_splice_init(&csk->backlogq, &backlogq);
    297		spin_unlock_bh(&csk->lock);
    298
    299		while ((skb = __skb_dequeue(&backlogq))) {
    300			fn = cxgbit_skcb_rx_backlog_fn(skb);
    301			fn(csk, skb);
    302		}
    303
    304		spin_lock_bh(&csk->lock);
    305	}
    306
    307	csk->lock_owner = false;
    308	spin_unlock_bh(&csk->lock);
    309}
    310
    311static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
    312{
    313	int ret = 0;
    314
    315	spin_lock_bh(&csk->lock);
    316	csk->lock_owner = true;
    317	spin_unlock_bh(&csk->lock);
    318
    319	if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) ||
    320		     signal_pending(current))) {
    321		__kfree_skb(skb);
    322		__skb_queue_purge(&csk->ppodq);
    323		ret = -1;
    324		goto unlock;
    325	}
    326
    327	csk->write_seq += skb->len +
    328			  cxgbit_skcb_tx_extralen(skb);
    329
    330	skb_queue_splice_tail_init(&csk->ppodq, &csk->txq);
    331	__skb_queue_tail(&csk->txq, skb);
    332	cxgbit_push_tx_frames(csk);
    333
    334unlock:
    335	cxgbit_unlock_sock(csk);
    336	return ret;
    337}
    338
    339static int
    340cxgbit_map_skb(struct iscsit_cmd *cmd, struct sk_buff *skb, u32 data_offset,
    341	       u32 data_length)
    342{
    343	u32 i = 0, nr_frags = MAX_SKB_FRAGS;
    344	u32 padding = ((-data_length) & 3);
    345	struct scatterlist *sg;
    346	struct page *page;
    347	unsigned int page_off;
    348
    349	if (padding)
    350		nr_frags--;
    351
    352	/*
    353	 * We know each entry in t_data_sg contains a page.
    354	 */
    355	sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
    356	page_off = (data_offset % PAGE_SIZE);
    357
    358	while (data_length && (i < nr_frags)) {
    359		u32 cur_len = min_t(u32, data_length, sg->length - page_off);
    360
    361		page = sg_page(sg);
    362
    363		get_page(page);
    364		skb_fill_page_desc(skb, i, page, sg->offset + page_off,
    365				   cur_len);
    366		skb->data_len += cur_len;
    367		skb->len += cur_len;
    368		skb->truesize += cur_len;
    369
    370		data_length -= cur_len;
    371		page_off = 0;
    372		sg = sg_next(sg);
    373		i++;
    374	}
    375
    376	if (data_length)
    377		return -1;
    378
    379	if (padding) {
    380		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
    381		if (!page)
    382			return -1;
    383		skb_fill_page_desc(skb, i, page, 0, padding);
    384		skb->data_len += padding;
    385		skb->len += padding;
    386		skb->truesize += padding;
    387	}
    388
    389	return 0;
    390}
    391
    392static int
    393cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsit_cmd *cmd,
    394		     struct iscsi_datain_req *dr)
    395{
    396	struct iscsit_conn *conn = csk->conn;
    397	struct sk_buff *skb;
    398	struct iscsi_datain datain;
    399	struct cxgbit_iso_info iso_info;
    400	u32 data_length = cmd->se_cmd.data_length;
    401	u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
    402	u32 num_pdu, plen, tx_data = 0;
    403	bool task_sense = !!(cmd->se_cmd.se_cmd_flags &
    404		SCF_TRANSPORT_TASK_SENSE);
    405	bool set_statsn = false;
    406	int ret = -1;
    407
    408	while (data_length) {
    409		num_pdu = (data_length + mrdsl - 1) / mrdsl;
    410		if (num_pdu > csk->max_iso_npdu)
    411			num_pdu = csk->max_iso_npdu;
    412
    413		plen = num_pdu * mrdsl;
    414		if (plen > data_length)
    415			plen = data_length;
    416
    417		skb = __cxgbit_alloc_skb(csk, 0, true);
    418		if (unlikely(!skb))
    419			return -ENOMEM;
    420
    421		memset(skb->data, 0, ISCSI_HDR_LEN);
    422		cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO;
    423		cxgbit_skcb_submode(skb) |= (csk->submode &
    424				CXGBIT_SUBMODE_DCRC);
    425		cxgbit_skcb_tx_extralen(skb) = (num_pdu *
    426				cxgbit_digest_len[cxgbit_skcb_submode(skb)]) +
    427						((num_pdu - 1) * ISCSI_HDR_LEN);
    428
    429		memset(&datain, 0, sizeof(struct iscsi_datain));
    430		memset(&iso_info, 0, sizeof(iso_info));
    431
    432		if (!tx_data)
    433			iso_info.flags |= CXGBIT_ISO_FSLICE;
    434
    435		if (!(data_length - plen)) {
    436			iso_info.flags |= CXGBIT_ISO_LSLICE;
    437			if (!task_sense) {
    438				datain.flags = ISCSI_FLAG_DATA_STATUS;
    439				iscsit_increment_maxcmdsn(cmd, conn->sess);
    440				cmd->stat_sn = conn->stat_sn++;
    441				set_statsn = true;
    442			}
    443		}
    444
    445		iso_info.burst_len = num_pdu * mrdsl;
    446		iso_info.mpdu = mrdsl;
    447		iso_info.len = ISCSI_HDR_LEN + plen;
    448
    449		cxgbit_cpl_tx_data_iso(skb, &iso_info);
    450
    451		datain.offset = tx_data;
    452		datain.data_sn = cmd->data_sn - 1;
    453
    454		iscsit_build_datain_pdu(cmd, conn, &datain,
    455					(struct iscsi_data_rsp *)skb->data,
    456					set_statsn);
    457
    458		ret = cxgbit_map_skb(cmd, skb, tx_data, plen);
    459		if (unlikely(ret)) {
    460			__kfree_skb(skb);
    461			goto out;
    462		}
    463
    464		ret = cxgbit_queue_skb(csk, skb);
    465		if (unlikely(ret))
    466			goto out;
    467
    468		tx_data += plen;
    469		data_length -= plen;
    470
    471		cmd->read_data_done += plen;
    472		cmd->data_sn += num_pdu;
    473	}
    474
    475	dr->dr_complete = DATAIN_COMPLETE_NORMAL;
    476
    477	return 0;
    478
    479out:
    480	return ret;
    481}
    482
    483static int
    484cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsit_cmd *cmd,
    485		 const struct iscsi_datain *datain)
    486{
    487	struct sk_buff *skb;
    488	int ret = 0;
    489
    490	skb = cxgbit_alloc_skb(csk, 0);
    491	if (unlikely(!skb))
    492		return -ENOMEM;
    493
    494	memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
    495
    496	if (datain->length) {
    497		cxgbit_skcb_submode(skb) |= (csk->submode &
    498				CXGBIT_SUBMODE_DCRC);
    499		cxgbit_skcb_tx_extralen(skb) =
    500				cxgbit_digest_len[cxgbit_skcb_submode(skb)];
    501	}
    502
    503	ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length);
    504	if (ret < 0) {
    505		__kfree_skb(skb);
    506		return ret;
    507	}
    508
    509	return cxgbit_queue_skb(csk, skb);
    510}
    511
    512static int
    513cxgbit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
    514		       struct iscsi_datain_req *dr,
    515		       const struct iscsi_datain *datain)
    516{
    517	struct cxgbit_sock *csk = conn->context;
    518	u32 data_length = cmd->se_cmd.data_length;
    519	u32 padding = ((-data_length) & 3);
    520	u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
    521
    522	if ((data_length > mrdsl) && (!dr->recovery) &&
    523	    (!padding) && (!datain->offset) && csk->max_iso_npdu) {
    524		atomic_long_add(data_length - datain->length,
    525				&conn->sess->tx_data_octets);
    526		return cxgbit_tx_datain_iso(csk, cmd, dr);
    527	}
    528
    529	return cxgbit_tx_datain(csk, cmd, datain);
    530}
    531
    532static int
    533cxgbit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
    534			  const void *data_buf, u32 data_buf_len)
    535{
    536	struct cxgbit_sock *csk = conn->context;
    537	struct sk_buff *skb;
    538	u32 padding = ((-data_buf_len) & 3);
    539
    540	skb = cxgbit_alloc_skb(csk, data_buf_len + padding);
    541	if (unlikely(!skb))
    542		return -ENOMEM;
    543
    544	memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
    545
    546	if (data_buf_len) {
    547		u32 pad_bytes = 0;
    548
    549		skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len);
    550
    551		if (padding)
    552			skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len,
    553				       &pad_bytes, padding);
    554	}
    555
    556	cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[
    557				       cxgbit_skcb_submode(skb)];
    558
    559	return cxgbit_queue_skb(csk, skb);
    560}
    561
    562int
    563cxgbit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
    564		struct iscsi_datain_req *dr, const void *buf, u32 buf_len)
    565{
    566	if (dr)
    567		return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf);
    568	else
    569		return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
    570}
    571
    572int cxgbit_validate_params(struct iscsit_conn *conn)
    573{
    574	struct cxgbit_sock *csk = conn->context;
    575	struct cxgbit_device *cdev = csk->com.cdev;
    576	struct iscsi_param *param;
    577	u32 max_xmitdsl;
    578
    579	param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH,
    580					  conn->param_list);
    581	if (!param)
    582		return -1;
    583
    584	if (kstrtou32(param->value, 0, &max_xmitdsl) < 0)
    585		return -1;
    586
    587	if (max_xmitdsl > cdev->mdsl) {
    588		if (iscsi_change_param_sprintf(
    589			conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl))
    590			return -1;
    591	}
    592
    593	return 0;
    594}
    595
    596static int cxgbit_set_digest(struct cxgbit_sock *csk)
    597{
    598	struct iscsit_conn *conn = csk->conn;
    599	struct iscsi_param *param;
    600
    601	param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list);
    602	if (!param) {
    603		pr_err("param not found key %s\n", HEADERDIGEST);
    604		return -1;
    605	}
    606
    607	if (!strcmp(param->value, CRC32C))
    608		csk->submode |= CXGBIT_SUBMODE_HCRC;
    609
    610	param = iscsi_find_param_from_key(DATADIGEST, conn->param_list);
    611	if (!param) {
    612		csk->submode = 0;
    613		pr_err("param not found key %s\n", DATADIGEST);
    614		return -1;
    615	}
    616
    617	if (!strcmp(param->value, CRC32C))
    618		csk->submode |= CXGBIT_SUBMODE_DCRC;
    619
    620	if (cxgbit_setup_conn_digest(csk)) {
    621		csk->submode = 0;
    622		return -1;
    623	}
    624
    625	return 0;
    626}
    627
    628static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
    629{
    630	struct iscsit_conn *conn = csk->conn;
    631	struct iscsi_conn_ops *conn_ops = conn->conn_ops;
    632	struct iscsi_param *param;
    633	u32 mrdsl, mbl;
    634	u32 max_npdu, max_iso_npdu;
    635	u32 max_iso_payload;
    636
    637	if (conn->login->leading_connection) {
    638		param = iscsi_find_param_from_key(MAXBURSTLENGTH,
    639						  conn->param_list);
    640		if (!param) {
    641			pr_err("param not found key %s\n", MAXBURSTLENGTH);
    642			return -1;
    643		}
    644
    645		if (kstrtou32(param->value, 0, &mbl) < 0)
    646			return -1;
    647	} else {
    648		mbl = conn->sess->sess_ops->MaxBurstLength;
    649	}
    650
    651	mrdsl = conn_ops->MaxRecvDataSegmentLength;
    652	max_npdu = mbl / mrdsl;
    653
    654	max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
    655
    656	max_iso_npdu = max_iso_payload /
    657		       (ISCSI_HDR_LEN + mrdsl +
    658			cxgbit_digest_len[csk->submode]);
    659
    660	csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
    661
    662	if (csk->max_iso_npdu <= 1)
    663		csk->max_iso_npdu = 0;
    664
    665	return 0;
    666}
    667
    668/*
    669 * cxgbit_seq_pdu_inorder()
    670 * @csk: pointer to cxgbit socket structure
    671 *
    672 * This function checks whether data sequence and data
    673 * pdu are in order.
    674 *
    675 * Return: returns -1 on error, 0 if data sequence and
    676 * data pdu are in order, 1 if data sequence or data pdu
    677 * is not in order.
    678 */
    679static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk)
    680{
    681	struct iscsit_conn *conn = csk->conn;
    682	struct iscsi_param *param;
    683
    684	if (conn->login->leading_connection) {
    685		param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
    686						  conn->param_list);
    687		if (!param) {
    688			pr_err("param not found key %s\n", DATASEQUENCEINORDER);
    689			return -1;
    690		}
    691
    692		if (strcmp(param->value, YES))
    693			return 1;
    694
    695		param = iscsi_find_param_from_key(DATAPDUINORDER,
    696						  conn->param_list);
    697		if (!param) {
    698			pr_err("param not found key %s\n", DATAPDUINORDER);
    699			return -1;
    700		}
    701
    702		if (strcmp(param->value, YES))
    703			return 1;
    704
    705	} else {
    706		if (!conn->sess->sess_ops->DataSequenceInOrder)
    707			return 1;
    708		if (!conn->sess->sess_ops->DataPDUInOrder)
    709			return 1;
    710	}
    711
    712	return 0;
    713}
    714
    715static int cxgbit_set_params(struct iscsit_conn *conn)
    716{
    717	struct cxgbit_sock *csk = conn->context;
    718	struct cxgbit_device *cdev = csk->com.cdev;
    719	struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm;
    720	struct iscsi_conn_ops *conn_ops = conn->conn_ops;
    721	struct iscsi_param *param;
    722	u8 erl;
    723
    724	if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
    725		conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
    726
    727	if (cxgbit_set_digest(csk))
    728		return -1;
    729
    730	if (conn->login->leading_connection) {
    731		param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
    732						  conn->param_list);
    733		if (!param) {
    734			pr_err("param not found key %s\n", ERRORRECOVERYLEVEL);
    735			return -1;
    736		}
    737		if (kstrtou8(param->value, 0, &erl) < 0)
    738			return -1;
    739	} else {
    740		erl = conn->sess->sess_ops->ErrorRecoveryLevel;
    741	}
    742
    743	if (!erl) {
    744		int ret;
    745
    746		ret = cxgbit_seq_pdu_inorder(csk);
    747		if (ret < 0) {
    748			return -1;
    749		} else if (ret > 0) {
    750			if (is_t5(cdev->lldi.adapter_type))
    751				goto enable_ddp;
    752			else
    753				return 0;
    754		}
    755
    756		if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
    757			if (cxgbit_set_iso_npdu(csk))
    758				return -1;
    759		}
    760
    761enable_ddp:
    762		if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) {
    763			if (cxgbit_setup_conn_pgidx(csk,
    764						    ppm->tformat.pgsz_idx_dflt))
    765				return -1;
    766			set_bit(CSK_DDP_ENABLE, &csk->com.flags);
    767		}
    768	}
    769
    770	return 0;
    771}
    772
    773int
    774cxgbit_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
    775		    u32 length)
    776{
    777	struct cxgbit_sock *csk = conn->context;
    778	struct sk_buff *skb;
    779	u32 padding_buf = 0;
    780	u8 padding = ((-length) & 3);
    781
    782	skb = cxgbit_alloc_skb(csk, length + padding);
    783	if (!skb)
    784		return -ENOMEM;
    785	skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN);
    786	skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length);
    787
    788	if (padding)
    789		skb_store_bits(skb, ISCSI_HDR_LEN + length,
    790			       &padding_buf, padding);
    791
    792	if (login->login_complete) {
    793		if (cxgbit_set_params(conn)) {
    794			kfree_skb(skb);
    795			return -1;
    796		}
    797
    798		set_bit(CSK_LOGIN_DONE, &csk->com.flags);
    799	}
    800
    801	if (cxgbit_queue_skb(csk, skb))
    802		return -1;
    803
    804	if ((!login->login_complete) && (!login->login_failed))
    805		schedule_delayed_work(&conn->login_work, 0);
    806
    807	return 0;
    808}
    809
    810static void
    811cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
    812		      unsigned int nents, u32 skip)
    813{
    814	struct skb_seq_state st;
    815	const u8 *buf;
    816	unsigned int consumed = 0, buf_len;
    817	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb);
    818
    819	skb_prepare_seq_read(skb, pdu_cb->doffset,
    820			     pdu_cb->doffset + pdu_cb->dlen,
    821			     &st);
    822
    823	while (true) {
    824		buf_len = skb_seq_read(consumed, &buf, &st);
    825		if (!buf_len) {
    826			skb_abort_seq_read(&st);
    827			break;
    828		}
    829
    830		consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
    831						 buf_len, skip + consumed);
    832	}
    833}
    834
    835static struct iscsit_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
    836{
    837	struct iscsit_conn *conn = csk->conn;
    838	struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev);
    839	struct cxgbit_cmd *ccmd;
    840	struct iscsit_cmd *cmd;
    841
    842	cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
    843	if (!cmd) {
    844		pr_err("Unable to allocate iscsit_cmd + cxgbit_cmd\n");
    845		return NULL;
    846	}
    847
    848	ccmd = iscsit_priv_cmd(cmd);
    849	ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask;
    850	ccmd->setup_ddp = true;
    851
    852	return cmd;
    853}
    854
    855static int
    856cxgbit_handle_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
    857			     u32 length)
    858{
    859	struct iscsit_conn *conn = cmd->conn;
    860	struct cxgbit_sock *csk = conn->context;
    861	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
    862
    863	if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
    864		pr_err("ImmediateData CRC32C DataDigest error\n");
    865		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
    866			pr_err("Unable to recover from"
    867			       " Immediate Data digest failure while"
    868			       " in ERL=0.\n");
    869			iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
    870					  (unsigned char *)hdr);
    871			return IMMEDIATE_DATA_CANNOT_RECOVER;
    872		}
    873
    874		iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
    875				  (unsigned char *)hdr);
    876		return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
    877	}
    878
    879	if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
    880		struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
    881		struct skb_shared_info *ssi = skb_shinfo(csk->skb);
    882		skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
    883
    884		sg_init_table(&ccmd->sg, 1);
    885		sg_set_page(&ccmd->sg, skb_frag_page(dfrag),
    886				skb_frag_size(dfrag), skb_frag_off(dfrag));
    887		get_page(skb_frag_page(dfrag));
    888
    889		cmd->se_cmd.t_data_sg = &ccmd->sg;
    890		cmd->se_cmd.t_data_nents = 1;
    891
    892		ccmd->release = true;
    893	} else {
    894		struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
    895		u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
    896
    897		cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
    898	}
    899
    900	cmd->write_data_done += pdu_cb->dlen;
    901
    902	if (cmd->write_data_done == cmd->se_cmd.data_length) {
    903		spin_lock_bh(&cmd->istate_lock);
    904		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
    905		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
    906		spin_unlock_bh(&cmd->istate_lock);
    907	}
    908
    909	return IMMEDIATE_DATA_NORMAL_OPERATION;
    910}
    911
    912static int
    913cxgbit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
    914			  bool dump_payload)
    915{
    916	struct iscsit_conn *conn = cmd->conn;
    917	int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
    918	/*
    919	 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
    920	 */
    921	if (dump_payload)
    922		goto after_immediate_data;
    923
    924	immed_ret = cxgbit_handle_immediate_data(cmd, hdr,
    925						 cmd->first_burst_len);
    926after_immediate_data:
    927	if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
    928		/*
    929		 * A PDU/CmdSN carrying Immediate Data passed
    930		 * DataCRC, check against ExpCmdSN/MaxCmdSN if
    931		 * Immediate Bit is not set.
    932		 */
    933		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
    934						(unsigned char *)hdr,
    935						hdr->cmdsn);
    936		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
    937			return -1;
    938
    939		if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
    940			target_put_sess_cmd(&cmd->se_cmd);
    941			return 0;
    942		} else if (cmd->unsolicited_data) {
    943			iscsit_set_unsolicited_dataout(cmd);
    944		}
    945
    946	} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
    947		/*
    948		 * Immediate Data failed DataCRC and ERL>=1,
    949		 * silently drop this PDU and let the initiator
    950		 * plug the CmdSN gap.
    951		 *
    952		 * FIXME: Send Unsolicited NOPIN with reserved
    953		 * TTT here to help the initiator figure out
    954		 * the missing CmdSN, although they should be
    955		 * intelligent enough to determine the missing
    956		 * CmdSN and issue a retry to plug the sequence.
    957		 */
    958		cmd->i_state = ISTATE_REMOVE;
    959		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
    960	} else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
    961		return -1;
    962
    963	return 0;
    964}
    965
    966static int
    967cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
    968{
    969	struct iscsit_conn *conn = csk->conn;
    970	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
    971	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr;
    972	int rc;
    973	bool dump_payload = false;
    974
    975	rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr);
    976	if (rc < 0)
    977		return rc;
    978
    979	if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) &&
    980	    (pdu_cb->nr_dfrags == 1))
    981		cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
    982
    983	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
    984	if (rc < 0)
    985		return 0;
    986	else if (rc > 0)
    987		dump_payload = true;
    988
    989	if (!pdu_cb->dlen)
    990		return 0;
    991
    992	return cxgbit_get_immediate_data(cmd, hdr, dump_payload);
    993}
    994
    995static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
    996{
    997	struct scatterlist *sg_start;
    998	struct iscsit_conn *conn = csk->conn;
    999	struct iscsit_cmd *cmd = NULL;
   1000	struct cxgbit_cmd *ccmd;
   1001	struct cxgbi_task_tag_info *ttinfo;
   1002	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
   1003	struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
   1004	u32 data_offset = be32_to_cpu(hdr->offset);
   1005	u32 data_len = ntoh24(hdr->dlength);
   1006	int rc, sg_nents, sg_off;
   1007	bool dcrc_err = false;
   1008
   1009	if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
   1010		u32 offset = be32_to_cpu(hdr->offset);
   1011		u32 ddp_data_len;
   1012		bool success = false;
   1013
   1014		cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
   1015		if (!cmd)
   1016			return 0;
   1017
   1018		ddp_data_len = offset - cmd->write_data_done;
   1019		atomic_long_add(ddp_data_len, &conn->sess->rx_data_octets);
   1020
   1021		cmd->write_data_done = offset;
   1022		cmd->next_burst_len = ddp_data_len;
   1023		cmd->data_sn = be32_to_cpu(hdr->datasn);
   1024
   1025		rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
   1026						cmd, data_len, &success);
   1027		if (rc < 0)
   1028			return rc;
   1029		else if (!success)
   1030			return 0;
   1031	} else {
   1032		rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
   1033		if (rc < 0)
   1034			return rc;
   1035		else if (!cmd)
   1036			return 0;
   1037	}
   1038
   1039	if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
   1040		pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
   1041		       " DataSN: 0x%08x\n",
   1042		       hdr->itt, hdr->offset, data_len,
   1043		       hdr->datasn);
   1044
   1045		dcrc_err = true;
   1046		goto check_payload;
   1047	}
   1048
   1049	pr_debug("DataOut data_len: %u, "
   1050		"write_data_done: %u, data_length: %u\n",
   1051		  data_len,  cmd->write_data_done,
   1052		  cmd->se_cmd.data_length);
   1053
   1054	if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
   1055		u32 skip = data_offset % PAGE_SIZE;
   1056
   1057		sg_off = data_offset / PAGE_SIZE;
   1058		sg_start = &cmd->se_cmd.t_data_sg[sg_off];
   1059		sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
   1060
   1061		cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
   1062	}
   1063
   1064	ccmd = iscsit_priv_cmd(cmd);
   1065	ttinfo = &ccmd->ttinfo;
   1066
   1067	if (ccmd->release && ttinfo->sgl &&
   1068	    (cmd->se_cmd.data_length ==	(cmd->write_data_done + data_len))) {
   1069		struct cxgbit_device *cdev = csk->com.cdev;
   1070		struct cxgbi_ppm *ppm = cdev2ppm(cdev);
   1071
   1072		dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
   1073			     DMA_FROM_DEVICE);
   1074		ttinfo->nents = 0;
   1075		ttinfo->sgl = NULL;
   1076	}
   1077
   1078check_payload:
   1079
   1080	rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
   1081	if (rc < 0)
   1082		return rc;
   1083
   1084	return 0;
   1085}
   1086
   1087static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
   1088{
   1089	struct iscsit_conn *conn = csk->conn;
   1090	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
   1091	struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr;
   1092	unsigned char *ping_data = NULL;
   1093	u32 payload_length = pdu_cb->dlen;
   1094	int ret;
   1095
   1096	ret = iscsit_setup_nop_out(conn, cmd, hdr);
   1097	if (ret < 0)
   1098		return 0;
   1099
   1100	if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
   1101		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
   1102			pr_err("Unable to recover from"
   1103			       " NOPOUT Ping DataCRC failure while in"
   1104			       " ERL=0.\n");
   1105			ret = -1;
   1106			goto out;
   1107		} else {
   1108			/*
   1109			 * drop this PDU and let the
   1110			 * initiator plug the CmdSN gap.
   1111			 */
   1112			pr_info("Dropping NOPOUT"
   1113				" Command CmdSN: 0x%08x due to"
   1114				" DataCRC error.\n", hdr->cmdsn);
   1115			ret = 0;
   1116			goto out;
   1117		}
   1118	}
   1119
   1120	/*
   1121	 * Handle NOP-OUT payload for traditional iSCSI sockets
   1122	 */
   1123	if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
   1124		ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
   1125		if (!ping_data) {
   1126			pr_err("Unable to allocate memory for"
   1127				" NOPOUT ping data.\n");
   1128			ret = -1;
   1129			goto out;
   1130		}
   1131
   1132		skb_copy_bits(csk->skb, pdu_cb->doffset,
   1133			      ping_data, payload_length);
   1134
   1135		ping_data[payload_length] = '\0';
   1136		/*
   1137		 * Attach ping data to struct iscsit_cmd->buf_ptr.
   1138		 */
   1139		cmd->buf_ptr = ping_data;
   1140		cmd->buf_ptr_size = payload_length;
   1141
   1142		pr_debug("Got %u bytes of NOPOUT ping"
   1143			" data.\n", payload_length);
   1144		pr_debug("Ping Data: \"%s\"\n", ping_data);
   1145	}
   1146
   1147	return iscsit_process_nop_out(conn, cmd, hdr);
   1148out:
   1149	if (cmd)
   1150		iscsit_free_cmd(cmd, false);
   1151	return ret;
   1152}
   1153
   1154static int
   1155cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
   1156{
   1157	struct iscsit_conn *conn = csk->conn;
   1158	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
   1159	struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr;
   1160	u32 payload_length = pdu_cb->dlen;
   1161	int rc;
   1162	unsigned char *text_in = NULL;
   1163
   1164	rc = iscsit_setup_text_cmd(conn, cmd, hdr);
   1165	if (rc < 0)
   1166		return rc;
   1167
   1168	if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
   1169		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
   1170			pr_err("Unable to recover from"
   1171			       " Text Data digest failure while in"
   1172			       " ERL=0.\n");
   1173			goto reject;
   1174		} else {
   1175			/*
   1176			 * drop this PDU and let the
   1177			 * initiator plug the CmdSN gap.
   1178			 */
   1179			pr_info("Dropping Text"
   1180				" Command CmdSN: 0x%08x due to"
   1181				" DataCRC error.\n", hdr->cmdsn);
   1182			return 0;
   1183		}
   1184	}
   1185
   1186	if (payload_length) {
   1187		text_in = kzalloc(payload_length, GFP_KERNEL);
   1188		if (!text_in) {
   1189			pr_err("Unable to allocate text_in of payload_length: %u\n",
   1190			       payload_length);
   1191			return -ENOMEM;
   1192		}
   1193		skb_copy_bits(csk->skb, pdu_cb->doffset,
   1194			      text_in, payload_length);
   1195
   1196		text_in[payload_length - 1] = '\0';
   1197
   1198		cmd->text_in_ptr = text_in;
   1199	}
   1200
   1201	return iscsit_process_text_cmd(conn, cmd, hdr);
   1202
   1203reject:
   1204	return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
   1205				 pdu_cb->hdr);
   1206}
   1207
   1208static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk)
   1209{
   1210	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
   1211	struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr;
   1212	struct iscsit_conn *conn = csk->conn;
   1213	struct iscsit_cmd *cmd = NULL;
   1214	u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
   1215	int ret = -EINVAL;
   1216
   1217	switch (opcode) {
   1218	case ISCSI_OP_SCSI_CMD:
   1219		cmd = cxgbit_allocate_cmd(csk);
   1220		if (!cmd)
   1221			goto reject;
   1222
   1223		ret = cxgbit_handle_scsi_cmd(csk, cmd);
   1224		break;
   1225	case ISCSI_OP_SCSI_DATA_OUT:
   1226		ret = cxgbit_handle_iscsi_dataout(csk);
   1227		break;
   1228	case ISCSI_OP_NOOP_OUT:
   1229		if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
   1230			cmd = cxgbit_allocate_cmd(csk);
   1231			if (!cmd)
   1232				goto reject;
   1233		}
   1234
   1235		ret = cxgbit_handle_nop_out(csk, cmd);
   1236		break;
   1237	case ISCSI_OP_SCSI_TMFUNC:
   1238		cmd = cxgbit_allocate_cmd(csk);
   1239		if (!cmd)
   1240			goto reject;
   1241
   1242		ret = iscsit_handle_task_mgt_cmd(conn, cmd,
   1243						 (unsigned char *)hdr);
   1244		break;
   1245	case ISCSI_OP_TEXT:
   1246		if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
   1247			cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
   1248			if (!cmd)
   1249				goto reject;
   1250		} else {
   1251			cmd = cxgbit_allocate_cmd(csk);
   1252			if (!cmd)
   1253				goto reject;
   1254		}
   1255
   1256		ret = cxgbit_handle_text_cmd(csk, cmd);
   1257		break;
   1258	case ISCSI_OP_LOGOUT:
   1259		cmd = cxgbit_allocate_cmd(csk);
   1260		if (!cmd)
   1261			goto reject;
   1262
   1263		ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
   1264		if (ret > 0)
   1265			wait_for_completion_timeout(&conn->conn_logout_comp,
   1266						    SECONDS_FOR_LOGOUT_COMP
   1267						    * HZ);
   1268		break;
   1269	case ISCSI_OP_SNACK:
   1270		ret = iscsit_handle_snack(conn, (unsigned char *)hdr);
   1271		break;
   1272	default:
   1273		pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
   1274		dump_stack();
   1275		break;
   1276	}
   1277
   1278	return ret;
   1279
   1280reject:
   1281	return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
   1282				 (unsigned char *)hdr);
   1283	return ret;
   1284}
   1285
   1286static int cxgbit_rx_opcode(struct cxgbit_sock *csk)
   1287{
   1288	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
   1289	struct iscsit_conn *conn = csk->conn;
   1290	struct iscsi_hdr *hdr = pdu_cb->hdr;
   1291	u8 opcode;
   1292
   1293	if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) {
   1294		atomic_long_inc(&conn->sess->conn_digest_errors);
   1295		goto transport_err;
   1296	}
   1297
   1298	if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
   1299		goto transport_err;
   1300
   1301	opcode = hdr->opcode & ISCSI_OPCODE_MASK;
   1302
   1303	if (conn->sess->sess_ops->SessionType &&
   1304	    ((!(opcode & ISCSI_OP_TEXT)) ||
   1305	     (!(opcode & ISCSI_OP_LOGOUT)))) {
   1306		pr_err("Received illegal iSCSI Opcode: 0x%02x"
   1307			" while in Discovery Session, rejecting.\n", opcode);
   1308		iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
   1309				  (unsigned char *)hdr);
   1310		goto transport_err;
   1311	}
   1312
   1313	if (cxgbit_target_rx_opcode(csk) < 0)
   1314		goto transport_err;
   1315
   1316	return 0;
   1317
   1318transport_err:
   1319	return -1;
   1320}
   1321
   1322static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk)
   1323{
   1324	struct iscsit_conn *conn = csk->conn;
   1325	struct iscsi_login *login = conn->login;
   1326	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
   1327	struct iscsi_login_req *login_req;
   1328
   1329	login_req = (struct iscsi_login_req *)login->req;
   1330	memcpy(login_req, pdu_cb->hdr, sizeof(*login_req));
   1331
   1332	pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
   1333		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
   1334		login_req->flags, login_req->itt, login_req->cmdsn,
   1335		login_req->exp_statsn, login_req->cid, pdu_cb->dlen);
   1336	/*
   1337	 * Setup the initial iscsi_login values from the leading
   1338	 * login request PDU.
   1339	 */
   1340	if (login->first_request) {
   1341		login_req = (struct iscsi_login_req *)login->req;
   1342		login->leading_connection = (!login_req->tsih) ? 1 : 0;
   1343		login->current_stage	= ISCSI_LOGIN_CURRENT_STAGE(
   1344				login_req->flags);
   1345		login->version_min	= login_req->min_version;
   1346		login->version_max	= login_req->max_version;
   1347		memcpy(login->isid, login_req->isid, 6);
   1348		login->cmd_sn		= be32_to_cpu(login_req->cmdsn);
   1349		login->init_task_tag	= login_req->itt;
   1350		login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
   1351		login->cid		= be16_to_cpu(login_req->cid);
   1352		login->tsih		= be16_to_cpu(login_req->tsih);
   1353	}
   1354
   1355	if (iscsi_target_check_login_request(conn, login) < 0)
   1356		return -1;
   1357
   1358	memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
   1359	skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen);
   1360
   1361	return 0;
   1362}
   1363
   1364static int
   1365cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx)
   1366{
   1367	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx);
   1368	int ret;
   1369
   1370	cxgbit_rx_pdu_cb(skb) = pdu_cb;
   1371
   1372	csk->skb = skb;
   1373
   1374	if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) {
   1375		ret = cxgbit_rx_login_pdu(csk);
   1376		set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
   1377	} else {
   1378		ret = cxgbit_rx_opcode(csk);
   1379	}
   1380
   1381	return ret;
   1382}
   1383
   1384static void cxgbit_lro_skb_dump(struct sk_buff *skb)
   1385{
   1386	struct skb_shared_info *ssi = skb_shinfo(skb);
   1387	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
   1388	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
   1389	u8 i;
   1390
   1391	pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n",
   1392		skb, skb->head, skb->data, skb->len, skb->data_len,
   1393		ssi->nr_frags);
   1394	pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n",
   1395		skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen);
   1396
   1397	for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++)
   1398		pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, "
   1399			"frags %u.\n",
   1400			skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq,
   1401			pdu_cb->ddigest, pdu_cb->frags);
   1402	for (i = 0; i < ssi->nr_frags; i++)
   1403		pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
   1404			skb, i, skb_frag_off(&ssi->frags[i]),
   1405			skb_frag_size(&ssi->frags[i]));
   1406}
   1407
   1408static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
   1409{
   1410	struct sk_buff *skb = csk->lro_hskb;
   1411	struct skb_shared_info *ssi = skb_shinfo(skb);
   1412	u8 i;
   1413
   1414	memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
   1415	for (i = 0; i < ssi->nr_frags; i++)
   1416		put_page(skb_frag_page(&ssi->frags[i]));
   1417	ssi->nr_frags = 0;
   1418	skb->data_len = 0;
   1419	skb->truesize -= skb->len;
   1420	skb->len = 0;
   1421}
   1422
   1423static void
   1424cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
   1425{
   1426	struct sk_buff *hskb = csk->lro_hskb;
   1427	struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0);
   1428	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx);
   1429	struct skb_shared_info *hssi = skb_shinfo(hskb);
   1430	struct skb_shared_info *ssi = skb_shinfo(skb);
   1431	unsigned int len = 0;
   1432
   1433	if (pdu_cb->flags & PDUCBF_RX_HDR) {
   1434		u8 hfrag_idx = hssi->nr_frags;
   1435
   1436		hpdu_cb->flags |= pdu_cb->flags;
   1437		hpdu_cb->seq = pdu_cb->seq;
   1438		hpdu_cb->hdr = pdu_cb->hdr;
   1439		hpdu_cb->hlen = pdu_cb->hlen;
   1440
   1441		memcpy(&hssi->frags[hfrag_idx], &ssi->frags[pdu_cb->hfrag_idx],
   1442		       sizeof(skb_frag_t));
   1443
   1444		get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
   1445		hssi->nr_frags++;
   1446		hpdu_cb->frags++;
   1447		hpdu_cb->hfrag_idx = hfrag_idx;
   1448
   1449		len = skb_frag_size(&hssi->frags[hfrag_idx]);
   1450		hskb->len += len;
   1451		hskb->data_len += len;
   1452		hskb->truesize += len;
   1453	}
   1454
   1455	if (pdu_cb->flags & PDUCBF_RX_DATA) {
   1456		u8 dfrag_idx = hssi->nr_frags, i;
   1457
   1458		hpdu_cb->flags |= pdu_cb->flags;
   1459		hpdu_cb->dfrag_idx = dfrag_idx;
   1460
   1461		len = 0;
   1462		for (i = 0; i < pdu_cb->nr_dfrags; dfrag_idx++, i++) {
   1463			memcpy(&hssi->frags[dfrag_idx],
   1464			       &ssi->frags[pdu_cb->dfrag_idx + i],
   1465			       sizeof(skb_frag_t));
   1466
   1467			get_page(skb_frag_page(&hssi->frags[dfrag_idx]));
   1468
   1469			len += skb_frag_size(&hssi->frags[dfrag_idx]);
   1470
   1471			hssi->nr_frags++;
   1472			hpdu_cb->frags++;
   1473		}
   1474
   1475		hpdu_cb->dlen = pdu_cb->dlen;
   1476		hpdu_cb->doffset = hpdu_cb->hlen;
   1477		hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags;
   1478		hskb->len += len;
   1479		hskb->data_len += len;
   1480		hskb->truesize += len;
   1481	}
   1482
   1483	if (pdu_cb->flags & PDUCBF_RX_STATUS) {
   1484		hpdu_cb->flags |= pdu_cb->flags;
   1485
   1486		if (hpdu_cb->flags & PDUCBF_RX_DATA)
   1487			hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD;
   1488
   1489		hpdu_cb->ddigest = pdu_cb->ddigest;
   1490		hpdu_cb->pdulen = pdu_cb->pdulen;
   1491	}
   1492}
   1493
   1494static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
   1495{
   1496	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
   1497	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
   1498	u8 pdu_idx = 0, last_idx = 0;
   1499	int ret = 0;
   1500
   1501	if (!pdu_cb->complete) {
   1502		cxgbit_lro_skb_merge(csk, skb, 0);
   1503
   1504		if (pdu_cb->flags & PDUCBF_RX_STATUS) {
   1505			struct sk_buff *hskb = csk->lro_hskb;
   1506
   1507			ret = cxgbit_process_iscsi_pdu(csk, hskb, 0);
   1508
   1509			cxgbit_lro_hskb_reset(csk);
   1510
   1511			if (ret < 0)
   1512				goto out;
   1513		}
   1514
   1515		pdu_idx = 1;
   1516	}
   1517
   1518	if (lro_cb->pdu_idx)
   1519		last_idx = lro_cb->pdu_idx - 1;
   1520
   1521	for (; pdu_idx <= last_idx; pdu_idx++) {
   1522		ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx);
   1523		if (ret < 0)
   1524			goto out;
   1525	}
   1526
   1527	if ((!lro_cb->complete) && lro_cb->pdu_idx)
   1528		cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx);
   1529
   1530out:
   1531	return ret;
   1532}
   1533
   1534static int cxgbit_t5_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
   1535{
   1536	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
   1537	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
   1538	int ret = -1;
   1539
   1540	if ((pdu_cb->flags & PDUCBF_RX_HDR) &&
   1541	    (pdu_cb->seq != csk->rcv_nxt)) {
   1542		pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n",
   1543			csk, csk->tid, pdu_cb->seq, csk->rcv_nxt);
   1544		cxgbit_lro_skb_dump(skb);
   1545		return ret;
   1546	}
   1547
   1548	csk->rcv_nxt += lro_cb->pdu_totallen;
   1549
   1550	ret = cxgbit_process_lro_skb(csk, skb);
   1551
   1552	csk->rx_credits += lro_cb->pdu_totallen;
   1553
   1554	if (csk->rx_credits >= (csk->rcv_win / 4))
   1555		cxgbit_rx_data_ack(csk);
   1556
   1557	return ret;
   1558}
   1559
   1560static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
   1561{
   1562	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
   1563	int ret;
   1564
   1565	ret = cxgbit_process_lro_skb(csk, skb);
   1566	if (ret)
   1567		return ret;
   1568
   1569	csk->rx_credits += lro_cb->pdu_totallen;
   1570	if (csk->rx_credits >= csk->rcv_win) {
   1571		csk->rx_credits = 0;
   1572		cxgbit_rx_data_ack(csk);
   1573	}
   1574
   1575	return 0;
   1576}
   1577
   1578static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
   1579{
   1580	struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
   1581	int ret = -1;
   1582
   1583	if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) {
   1584		if (is_t5(lldi->adapter_type))
   1585			ret = cxgbit_t5_rx_lro_skb(csk, skb);
   1586		else
   1587			ret = cxgbit_rx_lro_skb(csk, skb);
   1588	}
   1589
   1590	__kfree_skb(skb);
   1591	return ret;
   1592}
   1593
   1594static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq)
   1595{
   1596	spin_lock_bh(&csk->rxq.lock);
   1597	if (skb_queue_len(&csk->rxq)) {
   1598		skb_queue_splice_init(&csk->rxq, rxq);
   1599		spin_unlock_bh(&csk->rxq.lock);
   1600		return true;
   1601	}
   1602	spin_unlock_bh(&csk->rxq.lock);
   1603	return false;
   1604}
   1605
   1606static int cxgbit_wait_rxq(struct cxgbit_sock *csk)
   1607{
   1608	struct sk_buff *skb;
   1609	struct sk_buff_head rxq;
   1610
   1611	skb_queue_head_init(&rxq);
   1612
   1613	wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq));
   1614
   1615	if (signal_pending(current))
   1616		goto out;
   1617
   1618	while ((skb = __skb_dequeue(&rxq))) {
   1619		if (cxgbit_rx_skb(csk, skb))
   1620			goto out;
   1621	}
   1622
   1623	return 0;
   1624out:
   1625	__skb_queue_purge(&rxq);
   1626	return -1;
   1627}
   1628
   1629int cxgbit_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
   1630{
   1631	struct cxgbit_sock *csk = conn->context;
   1632	int ret = -1;
   1633
   1634	while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) {
   1635		ret = cxgbit_wait_rxq(csk);
   1636		if (ret) {
   1637			clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
   1638			break;
   1639		}
   1640	}
   1641
   1642	return ret;
   1643}
   1644
   1645void cxgbit_get_rx_pdu(struct iscsit_conn *conn)
   1646{
   1647	struct cxgbit_sock *csk = conn->context;
   1648
   1649	while (!kthread_should_stop()) {
   1650		iscsit_thread_check_cpumask(conn, current, 0);
   1651		if (cxgbit_wait_rxq(csk))
   1652			return;
   1653	}
   1654}